pci-imx6.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
  20. #include <linux/module.h>
  21. #include <linux/of_gpio.h>
  22. #include <linux/of_device.h>
  23. #include <linux/pci.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/regmap.h>
  26. #include <linux/resource.h>
  27. #include <linux/signal.h>
  28. #include <linux/types.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/reset.h>
  31. #include "pcie-designware.h"
  32. #define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
  33. enum imx6_pcie_variants {
  34. IMX6Q,
  35. IMX6SX,
  36. IMX6QP,
  37. IMX7D,
  38. };
  39. struct imx6_pcie {
  40. struct dw_pcie *pci;
  41. int reset_gpio;
  42. bool gpio_active_high;
  43. struct clk *pcie_bus;
  44. struct clk *pcie_phy;
  45. struct clk *pcie_inbound_axi;
  46. struct clk *pcie;
  47. struct regmap *iomuxc_gpr;
  48. struct reset_control *pciephy_reset;
  49. struct reset_control *apps_reset;
  50. enum imx6_pcie_variants variant;
  51. u32 tx_deemph_gen1;
  52. u32 tx_deemph_gen2_3p5db;
  53. u32 tx_deemph_gen2_6db;
  54. u32 tx_swing_full;
  55. u32 tx_swing_low;
  56. int link_gen;
  57. };
  58. /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
  59. #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
  60. #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
  61. #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
  62. /* PCIe Root Complex registers (memory-mapped) */
  63. #define PCIE_RC_LCR 0x7c
  64. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  65. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  66. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  67. #define PCIE_RC_LCSR 0x80
  68. /* PCIe Port Logic registers (memory-mapped) */
  69. #define PL_OFFSET 0x700
  70. #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
  71. #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
  72. #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
  73. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  74. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  75. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
  76. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
  77. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  78. #define PCIE_PHY_CTRL_DATA_LOC 0
  79. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  80. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  81. #define PCIE_PHY_CTRL_WR_LOC 18
  82. #define PCIE_PHY_CTRL_RD_LOC 19
  83. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  84. #define PCIE_PHY_STAT_ACK_LOC 16
  85. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  86. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  87. /* PHY registers (not memory-mapped) */
  88. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  89. #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
  90. #define PHY_RX_OVRD_IN_LO 0x1005
  91. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  92. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  93. static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
  94. {
  95. struct dw_pcie *pci = imx6_pcie->pci;
  96. u32 val;
  97. u32 max_iterations = 10;
  98. u32 wait_counter = 0;
  99. do {
  100. val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
  101. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  102. wait_counter++;
  103. if (val == exp_val)
  104. return 0;
  105. udelay(1);
  106. } while (wait_counter < max_iterations);
  107. return -ETIMEDOUT;
  108. }
  109. static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
  110. {
  111. struct dw_pcie *pci = imx6_pcie->pci;
  112. u32 val;
  113. int ret;
  114. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  115. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  116. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  117. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  118. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  119. if (ret)
  120. return ret;
  121. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  122. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  123. return pcie_phy_poll_ack(imx6_pcie, 0);
  124. }
  125. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  126. static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
  127. {
  128. struct dw_pcie *pci = imx6_pcie->pci;
  129. u32 val, phy_ctl;
  130. int ret;
  131. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  132. if (ret)
  133. return ret;
  134. /* assert Read signal */
  135. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  136. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
  137. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  138. if (ret)
  139. return ret;
  140. val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
  141. *data = val & 0xffff;
  142. /* deassert Read signal */
  143. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
  144. return pcie_phy_poll_ack(imx6_pcie, 0);
  145. }
  146. static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
  147. {
  148. struct dw_pcie *pci = imx6_pcie->pci;
  149. u32 var;
  150. int ret;
  151. /* write addr */
  152. /* cap addr */
  153. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  154. if (ret)
  155. return ret;
  156. var = data << PCIE_PHY_CTRL_DATA_LOC;
  157. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  158. /* capture data */
  159. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  160. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  161. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  162. if (ret)
  163. return ret;
  164. /* deassert cap data */
  165. var = data << PCIE_PHY_CTRL_DATA_LOC;
  166. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  167. /* wait for ack de-assertion */
  168. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  169. if (ret)
  170. return ret;
  171. /* assert wr signal */
  172. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  173. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  174. /* wait for ack */
  175. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  176. if (ret)
  177. return ret;
  178. /* deassert wr signal */
  179. var = data << PCIE_PHY_CTRL_DATA_LOC;
  180. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  181. /* wait for ack de-assertion */
  182. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  183. if (ret)
  184. return ret;
  185. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
  186. return 0;
  187. }
  188. static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
  189. {
  190. u32 tmp;
  191. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  192. tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  193. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  194. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  195. usleep_range(2000, 3000);
  196. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  197. tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  198. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  199. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  200. }
  201. /* Added for PCI abort handling */
  202. static int imx6q_pcie_abort_handler(unsigned long addr,
  203. unsigned int fsr, struct pt_regs *regs)
  204. {
  205. return 0;
  206. }
  207. static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
  208. {
  209. switch (imx6_pcie->variant) {
  210. case IMX7D:
  211. reset_control_assert(imx6_pcie->pciephy_reset);
  212. reset_control_assert(imx6_pcie->apps_reset);
  213. break;
  214. case IMX6SX:
  215. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  216. IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
  217. IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
  218. /* Force PCIe PHY reset */
  219. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  220. IMX6SX_GPR5_PCIE_BTNRST_RESET,
  221. IMX6SX_GPR5_PCIE_BTNRST_RESET);
  222. break;
  223. case IMX6QP:
  224. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  225. IMX6Q_GPR1_PCIE_SW_RST,
  226. IMX6Q_GPR1_PCIE_SW_RST);
  227. break;
  228. case IMX6Q:
  229. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  230. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  231. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  232. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  233. break;
  234. }
  235. }
  236. static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
  237. {
  238. struct dw_pcie *pci = imx6_pcie->pci;
  239. struct device *dev = pci->dev;
  240. int ret = 0;
  241. switch (imx6_pcie->variant) {
  242. case IMX6SX:
  243. ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
  244. if (ret) {
  245. dev_err(dev, "unable to enable pcie_axi clock\n");
  246. break;
  247. }
  248. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  249. IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
  250. break;
  251. case IMX6QP: /* FALLTHROUGH */
  252. case IMX6Q:
  253. /* power up core phy and enable ref clock */
  254. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  255. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  256. /*
  257. * the async reset input need ref clock to sync internally,
  258. * when the ref clock comes after reset, internal synced
  259. * reset time is too short, cannot meet the requirement.
  260. * add one ~10us delay here.
  261. */
  262. udelay(10);
  263. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  264. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  265. break;
  266. case IMX7D:
  267. break;
  268. }
  269. return ret;
  270. }
  271. static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
  272. {
  273. u32 val;
  274. unsigned int retries;
  275. struct device *dev = imx6_pcie->pci->dev;
  276. for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
  277. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
  278. if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
  279. return;
  280. usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
  281. PHY_PLL_LOCK_WAIT_USLEEP_MAX);
  282. }
  283. dev_err(dev, "PCIe PLL lock timeout\n");
  284. }
  285. static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
  286. {
  287. struct dw_pcie *pci = imx6_pcie->pci;
  288. struct device *dev = pci->dev;
  289. int ret;
  290. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  291. if (ret) {
  292. dev_err(dev, "unable to enable pcie_phy clock\n");
  293. return;
  294. }
  295. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  296. if (ret) {
  297. dev_err(dev, "unable to enable pcie_bus clock\n");
  298. goto err_pcie_bus;
  299. }
  300. ret = clk_prepare_enable(imx6_pcie->pcie);
  301. if (ret) {
  302. dev_err(dev, "unable to enable pcie clock\n");
  303. goto err_pcie;
  304. }
  305. ret = imx6_pcie_enable_ref_clk(imx6_pcie);
  306. if (ret) {
  307. dev_err(dev, "unable to enable pcie ref clock\n");
  308. goto err_ref_clk;
  309. }
  310. /* allow the clocks to stabilize */
  311. usleep_range(200, 500);
  312. /* Some boards don't have PCIe reset GPIO. */
  313. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  314. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  315. imx6_pcie->gpio_active_high);
  316. msleep(100);
  317. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  318. !imx6_pcie->gpio_active_high);
  319. }
  320. switch (imx6_pcie->variant) {
  321. case IMX7D:
  322. reset_control_deassert(imx6_pcie->pciephy_reset);
  323. imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
  324. break;
  325. case IMX6SX:
  326. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  327. IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
  328. break;
  329. case IMX6QP:
  330. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  331. IMX6Q_GPR1_PCIE_SW_RST, 0);
  332. usleep_range(200, 500);
  333. break;
  334. case IMX6Q: /* Nothing to do */
  335. break;
  336. }
  337. return;
  338. err_ref_clk:
  339. clk_disable_unprepare(imx6_pcie->pcie);
  340. err_pcie:
  341. clk_disable_unprepare(imx6_pcie->pcie_bus);
  342. err_pcie_bus:
  343. clk_disable_unprepare(imx6_pcie->pcie_phy);
  344. }
  345. static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
  346. {
  347. switch (imx6_pcie->variant) {
  348. case IMX7D:
  349. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  350. IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
  351. break;
  352. case IMX6SX:
  353. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  354. IMX6SX_GPR12_PCIE_RX_EQ_MASK,
  355. IMX6SX_GPR12_PCIE_RX_EQ_2);
  356. /* FALLTHROUGH */
  357. default:
  358. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  359. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  360. /* configure constant input signal to the pcie ctrl and phy */
  361. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  362. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  363. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  364. IMX6Q_GPR8_TX_DEEMPH_GEN1,
  365. imx6_pcie->tx_deemph_gen1 << 0);
  366. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  367. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
  368. imx6_pcie->tx_deemph_gen2_3p5db << 6);
  369. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  370. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
  371. imx6_pcie->tx_deemph_gen2_6db << 12);
  372. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  373. IMX6Q_GPR8_TX_SWING_FULL,
  374. imx6_pcie->tx_swing_full << 18);
  375. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  376. IMX6Q_GPR8_TX_SWING_LOW,
  377. imx6_pcie->tx_swing_low << 25);
  378. break;
  379. }
  380. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  381. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  382. }
  383. static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
  384. {
  385. struct dw_pcie *pci = imx6_pcie->pci;
  386. struct device *dev = pci->dev;
  387. /* check if the link is up or not */
  388. if (!dw_pcie_wait_for_link(pci))
  389. return 0;
  390. dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  391. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
  392. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
  393. return -ETIMEDOUT;
  394. }
  395. static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
  396. {
  397. struct dw_pcie *pci = imx6_pcie->pci;
  398. struct device *dev = pci->dev;
  399. u32 tmp;
  400. unsigned int retries;
  401. for (retries = 0; retries < 200; retries++) {
  402. tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  403. /* Test if the speed change finished. */
  404. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  405. return 0;
  406. usleep_range(100, 1000);
  407. }
  408. dev_err(dev, "Speed change timeout\n");
  409. return -EINVAL;
  410. }
  411. static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
  412. {
  413. struct imx6_pcie *imx6_pcie = arg;
  414. struct dw_pcie *pci = imx6_pcie->pci;
  415. struct pcie_port *pp = &pci->pp;
  416. return dw_handle_msi_irq(pp);
  417. }
  418. static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
  419. {
  420. struct dw_pcie *pci = imx6_pcie->pci;
  421. struct device *dev = pci->dev;
  422. u32 tmp;
  423. int ret;
  424. /*
  425. * Force Gen1 operation when starting the link. In case the link is
  426. * started in Gen2 mode, there is a possibility the devices on the
  427. * bus will not be detected at all. This happens with PCIe switches.
  428. */
  429. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
  430. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  431. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  432. dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
  433. /* Start LTSSM. */
  434. if (imx6_pcie->variant == IMX7D)
  435. reset_control_deassert(imx6_pcie->apps_reset);
  436. else
  437. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  438. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  439. ret = imx6_pcie_wait_for_link(imx6_pcie);
  440. if (ret)
  441. goto err_reset_phy;
  442. if (imx6_pcie->link_gen == 2) {
  443. /* Allow Gen2 mode after the link is up. */
  444. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
  445. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  446. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  447. dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
  448. /*
  449. * Start Directed Speed Change so the best possible
  450. * speed both link partners support can be negotiated.
  451. */
  452. tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  453. tmp |= PORT_LOGIC_SPEED_CHANGE;
  454. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
  455. if (imx6_pcie->variant != IMX7D) {
  456. /*
  457. * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
  458. * from i.MX6 family when no link speed transition
  459. * occurs and we go Gen1 -> yep, Gen1. The difference
  460. * is that, in such case, it will not be cleared by HW
  461. * which will cause the following code to report false
  462. * failure.
  463. */
  464. ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
  465. if (ret) {
  466. dev_err(dev, "Failed to bring link up!\n");
  467. goto err_reset_phy;
  468. }
  469. }
  470. /* Make sure link training is finished as well! */
  471. ret = imx6_pcie_wait_for_link(imx6_pcie);
  472. if (ret) {
  473. dev_err(dev, "Failed to bring link up!\n");
  474. goto err_reset_phy;
  475. }
  476. } else {
  477. dev_info(dev, "Link: Gen2 disabled\n");
  478. }
  479. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
  480. dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
  481. return 0;
  482. err_reset_phy:
  483. dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
  484. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
  485. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
  486. imx6_pcie_reset_phy(imx6_pcie);
  487. return ret;
  488. }
  489. static void imx6_pcie_host_init(struct pcie_port *pp)
  490. {
  491. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  492. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
  493. imx6_pcie_assert_core_reset(imx6_pcie);
  494. imx6_pcie_init_phy(imx6_pcie);
  495. imx6_pcie_deassert_core_reset(imx6_pcie);
  496. dw_pcie_setup_rc(pp);
  497. imx6_pcie_establish_link(imx6_pcie);
  498. if (IS_ENABLED(CONFIG_PCI_MSI))
  499. dw_pcie_msi_init(pp);
  500. }
  501. static int imx6_pcie_link_up(struct dw_pcie *pci)
  502. {
  503. return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
  504. PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
  505. }
  506. static struct dw_pcie_host_ops imx6_pcie_host_ops = {
  507. .host_init = imx6_pcie_host_init,
  508. };
  509. static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
  510. struct platform_device *pdev)
  511. {
  512. struct dw_pcie *pci = imx6_pcie->pci;
  513. struct pcie_port *pp = &pci->pp;
  514. struct device *dev = &pdev->dev;
  515. int ret;
  516. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  517. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  518. if (pp->msi_irq <= 0) {
  519. dev_err(dev, "failed to get MSI irq\n");
  520. return -ENODEV;
  521. }
  522. ret = devm_request_irq(dev, pp->msi_irq,
  523. imx6_pcie_msi_handler,
  524. IRQF_SHARED | IRQF_NO_THREAD,
  525. "mx6-pcie-msi", imx6_pcie);
  526. if (ret) {
  527. dev_err(dev, "failed to request MSI irq\n");
  528. return ret;
  529. }
  530. }
  531. pp->root_bus_nr = -1;
  532. pp->ops = &imx6_pcie_host_ops;
  533. ret = dw_pcie_host_init(pp);
  534. if (ret) {
  535. dev_err(dev, "failed to initialize host\n");
  536. return ret;
  537. }
  538. return 0;
  539. }
  540. static const struct dw_pcie_ops dw_pcie_ops = {
  541. .link_up = imx6_pcie_link_up,
  542. };
  543. static int imx6_pcie_probe(struct platform_device *pdev)
  544. {
  545. struct device *dev = &pdev->dev;
  546. struct dw_pcie *pci;
  547. struct imx6_pcie *imx6_pcie;
  548. struct resource *dbi_base;
  549. struct device_node *node = dev->of_node;
  550. int ret;
  551. imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
  552. if (!imx6_pcie)
  553. return -ENOMEM;
  554. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  555. if (!pci)
  556. return -ENOMEM;
  557. pci->dev = dev;
  558. pci->ops = &dw_pcie_ops;
  559. imx6_pcie->pci = pci;
  560. imx6_pcie->variant =
  561. (enum imx6_pcie_variants)of_device_get_match_data(dev);
  562. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  563. pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
  564. if (IS_ERR(pci->dbi_base))
  565. return PTR_ERR(pci->dbi_base);
  566. /* Fetch GPIOs */
  567. imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
  568. imx6_pcie->gpio_active_high = of_property_read_bool(node,
  569. "reset-gpio-active-high");
  570. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  571. ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
  572. imx6_pcie->gpio_active_high ?
  573. GPIOF_OUT_INIT_HIGH :
  574. GPIOF_OUT_INIT_LOW,
  575. "PCIe reset");
  576. if (ret) {
  577. dev_err(dev, "unable to get reset gpio\n");
  578. return ret;
  579. }
  580. } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
  581. return imx6_pcie->reset_gpio;
  582. }
  583. /* Fetch clocks */
  584. imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
  585. if (IS_ERR(imx6_pcie->pcie_phy)) {
  586. dev_err(dev, "pcie_phy clock source missing or invalid\n");
  587. return PTR_ERR(imx6_pcie->pcie_phy);
  588. }
  589. imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
  590. if (IS_ERR(imx6_pcie->pcie_bus)) {
  591. dev_err(dev, "pcie_bus clock source missing or invalid\n");
  592. return PTR_ERR(imx6_pcie->pcie_bus);
  593. }
  594. imx6_pcie->pcie = devm_clk_get(dev, "pcie");
  595. if (IS_ERR(imx6_pcie->pcie)) {
  596. dev_err(dev, "pcie clock source missing or invalid\n");
  597. return PTR_ERR(imx6_pcie->pcie);
  598. }
  599. switch (imx6_pcie->variant) {
  600. case IMX6SX:
  601. imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
  602. "pcie_inbound_axi");
  603. if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
  604. dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
  605. return PTR_ERR(imx6_pcie->pcie_inbound_axi);
  606. }
  607. break;
  608. case IMX7D:
  609. imx6_pcie->pciephy_reset = devm_reset_control_get(dev,
  610. "pciephy");
  611. if (IS_ERR(imx6_pcie->pciephy_reset)) {
  612. dev_err(dev, "Failed to get PCIEPHY reset control\n");
  613. return PTR_ERR(imx6_pcie->pciephy_reset);
  614. }
  615. imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps");
  616. if (IS_ERR(imx6_pcie->apps_reset)) {
  617. dev_err(dev, "Failed to get PCIE APPS reset control\n");
  618. return PTR_ERR(imx6_pcie->apps_reset);
  619. }
  620. break;
  621. default:
  622. break;
  623. }
  624. /* Grab GPR config register range */
  625. imx6_pcie->iomuxc_gpr =
  626. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  627. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  628. dev_err(dev, "unable to find iomuxc registers\n");
  629. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  630. }
  631. /* Grab PCIe PHY Tx Settings */
  632. if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
  633. &imx6_pcie->tx_deemph_gen1))
  634. imx6_pcie->tx_deemph_gen1 = 0;
  635. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
  636. &imx6_pcie->tx_deemph_gen2_3p5db))
  637. imx6_pcie->tx_deemph_gen2_3p5db = 0;
  638. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
  639. &imx6_pcie->tx_deemph_gen2_6db))
  640. imx6_pcie->tx_deemph_gen2_6db = 20;
  641. if (of_property_read_u32(node, "fsl,tx-swing-full",
  642. &imx6_pcie->tx_swing_full))
  643. imx6_pcie->tx_swing_full = 127;
  644. if (of_property_read_u32(node, "fsl,tx-swing-low",
  645. &imx6_pcie->tx_swing_low))
  646. imx6_pcie->tx_swing_low = 127;
  647. /* Limit link speed */
  648. ret = of_property_read_u32(node, "fsl,max-link-speed",
  649. &imx6_pcie->link_gen);
  650. if (ret)
  651. imx6_pcie->link_gen = 1;
  652. platform_set_drvdata(pdev, imx6_pcie);
  653. ret = imx6_add_pcie_port(imx6_pcie, pdev);
  654. if (ret < 0)
  655. return ret;
  656. return 0;
  657. }
  658. static void imx6_pcie_shutdown(struct platform_device *pdev)
  659. {
  660. struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
  661. /* bring down link, so bootloader gets clean state in case of reboot */
  662. imx6_pcie_assert_core_reset(imx6_pcie);
  663. }
  664. static const struct of_device_id imx6_pcie_of_match[] = {
  665. { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
  666. { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
  667. { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
  668. { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
  669. {},
  670. };
  671. static struct platform_driver imx6_pcie_driver = {
  672. .driver = {
  673. .name = "imx6q-pcie",
  674. .of_match_table = imx6_pcie_of_match,
  675. },
  676. .probe = imx6_pcie_probe,
  677. .shutdown = imx6_pcie_shutdown,
  678. };
  679. static int __init imx6_pcie_init(void)
  680. {
  681. /*
  682. * Since probe() can be deferred we need to make sure that
  683. * hook_fault_code is not called after __init memory is freed
  684. * by kernel and since imx6q_pcie_abort_handler() is a no-op,
  685. * we can install the handler here without risking it
  686. * accessing some uninitialized driver state.
  687. */
  688. hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
  689. "imprecise external abort");
  690. return platform_driver_register(&imx6_pcie_driver);
  691. }
  692. device_initcall(imx6_pcie_init);