pci-imx6.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
  20. #include <linux/module.h>
  21. #include <linux/of_gpio.h>
  22. #include <linux/of_device.h>
  23. #include <linux/pci.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/regmap.h>
  26. #include <linux/regulator/consumer.h>
  27. #include <linux/resource.h>
  28. #include <linux/signal.h>
  29. #include <linux/types.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/reset.h>
  32. #include "pcie-designware.h"
  33. #define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
  34. enum imx6_pcie_variants {
  35. IMX6Q,
  36. IMX6SX,
  37. IMX6QP,
  38. IMX7D,
  39. };
  40. struct imx6_pcie {
  41. struct dw_pcie *pci;
  42. int reset_gpio;
  43. bool gpio_active_high;
  44. struct clk *pcie_bus;
  45. struct clk *pcie_phy;
  46. struct clk *pcie_inbound_axi;
  47. struct clk *pcie;
  48. struct regmap *iomuxc_gpr;
  49. struct reset_control *pciephy_reset;
  50. struct reset_control *apps_reset;
  51. enum imx6_pcie_variants variant;
  52. u32 tx_deemph_gen1;
  53. u32 tx_deemph_gen2_3p5db;
  54. u32 tx_deemph_gen2_6db;
  55. u32 tx_swing_full;
  56. u32 tx_swing_low;
  57. int link_gen;
  58. struct regulator *vpcie;
  59. };
  60. /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
  61. #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
  62. #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
  63. #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
  64. /* PCIe Root Complex registers (memory-mapped) */
  65. #define PCIE_RC_LCR 0x7c
  66. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  67. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  68. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  69. #define PCIE_RC_LCSR 0x80
  70. /* PCIe Port Logic registers (memory-mapped) */
  71. #define PL_OFFSET 0x700
  72. #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
  73. #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
  74. #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
  75. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  76. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  77. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
  78. #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
  79. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  80. #define PCIE_PHY_CTRL_DATA_LOC 0
  81. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  82. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  83. #define PCIE_PHY_CTRL_WR_LOC 18
  84. #define PCIE_PHY_CTRL_RD_LOC 19
  85. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  86. #define PCIE_PHY_STAT_ACK_LOC 16
  87. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  88. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  89. /* PHY registers (not memory-mapped) */
  90. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  91. #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
  92. #define PHY_RX_OVRD_IN_LO 0x1005
  93. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  94. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  95. static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
  96. {
  97. struct dw_pcie *pci = imx6_pcie->pci;
  98. u32 val;
  99. u32 max_iterations = 10;
  100. u32 wait_counter = 0;
  101. do {
  102. val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
  103. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  104. wait_counter++;
  105. if (val == exp_val)
  106. return 0;
  107. udelay(1);
  108. } while (wait_counter < max_iterations);
  109. return -ETIMEDOUT;
  110. }
  111. static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
  112. {
  113. struct dw_pcie *pci = imx6_pcie->pci;
  114. u32 val;
  115. int ret;
  116. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  117. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  118. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  119. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  120. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  121. if (ret)
  122. return ret;
  123. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  124. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  125. return pcie_phy_poll_ack(imx6_pcie, 0);
  126. }
  127. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  128. static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
  129. {
  130. struct dw_pcie *pci = imx6_pcie->pci;
  131. u32 val, phy_ctl;
  132. int ret;
  133. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  134. if (ret)
  135. return ret;
  136. /* assert Read signal */
  137. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  138. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
  139. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  140. if (ret)
  141. return ret;
  142. val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
  143. *data = val & 0xffff;
  144. /* deassert Read signal */
  145. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
  146. return pcie_phy_poll_ack(imx6_pcie, 0);
  147. }
  148. static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
  149. {
  150. struct dw_pcie *pci = imx6_pcie->pci;
  151. u32 var;
  152. int ret;
  153. /* write addr */
  154. /* cap addr */
  155. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  156. if (ret)
  157. return ret;
  158. var = data << PCIE_PHY_CTRL_DATA_LOC;
  159. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  160. /* capture data */
  161. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  162. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  163. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  164. if (ret)
  165. return ret;
  166. /* deassert cap data */
  167. var = data << PCIE_PHY_CTRL_DATA_LOC;
  168. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  169. /* wait for ack de-assertion */
  170. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  171. if (ret)
  172. return ret;
  173. /* assert wr signal */
  174. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  175. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  176. /* wait for ack */
  177. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  178. if (ret)
  179. return ret;
  180. /* deassert wr signal */
  181. var = data << PCIE_PHY_CTRL_DATA_LOC;
  182. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  183. /* wait for ack de-assertion */
  184. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  185. if (ret)
  186. return ret;
  187. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
  188. return 0;
  189. }
  190. static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
  191. {
  192. u32 tmp;
  193. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  194. tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  195. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  196. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  197. usleep_range(2000, 3000);
  198. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  199. tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  200. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  201. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  202. }
  203. /* Added for PCI abort handling */
  204. static int imx6q_pcie_abort_handler(unsigned long addr,
  205. unsigned int fsr, struct pt_regs *regs)
  206. {
  207. unsigned long pc = instruction_pointer(regs);
  208. unsigned long instr = *(unsigned long *)pc;
  209. int reg = (instr >> 12) & 15;
  210. /*
  211. * If the instruction being executed was a read,
  212. * make it look like it read all-ones.
  213. */
  214. if ((instr & 0x0c100000) == 0x04100000) {
  215. unsigned long val;
  216. if (instr & 0x00400000)
  217. val = 255;
  218. else
  219. val = -1;
  220. regs->uregs[reg] = val;
  221. regs->ARM_pc += 4;
  222. return 0;
  223. }
  224. if ((instr & 0x0e100090) == 0x00100090) {
  225. regs->uregs[reg] = -1;
  226. regs->ARM_pc += 4;
  227. return 0;
  228. }
  229. return 1;
  230. }
  231. static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
  232. {
  233. struct device *dev = imx6_pcie->pci->dev;
  234. switch (imx6_pcie->variant) {
  235. case IMX7D:
  236. reset_control_assert(imx6_pcie->pciephy_reset);
  237. reset_control_assert(imx6_pcie->apps_reset);
  238. break;
  239. case IMX6SX:
  240. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  241. IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
  242. IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
  243. /* Force PCIe PHY reset */
  244. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  245. IMX6SX_GPR5_PCIE_BTNRST_RESET,
  246. IMX6SX_GPR5_PCIE_BTNRST_RESET);
  247. break;
  248. case IMX6QP:
  249. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  250. IMX6Q_GPR1_PCIE_SW_RST,
  251. IMX6Q_GPR1_PCIE_SW_RST);
  252. break;
  253. case IMX6Q:
  254. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  255. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  256. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  257. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  258. break;
  259. }
  260. if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
  261. int ret = regulator_disable(imx6_pcie->vpcie);
  262. if (ret)
  263. dev_err(dev, "failed to disable vpcie regulator: %d\n",
  264. ret);
  265. }
  266. }
  267. static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
  268. {
  269. struct dw_pcie *pci = imx6_pcie->pci;
  270. struct device *dev = pci->dev;
  271. int ret = 0;
  272. switch (imx6_pcie->variant) {
  273. case IMX6SX:
  274. ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
  275. if (ret) {
  276. dev_err(dev, "unable to enable pcie_axi clock\n");
  277. break;
  278. }
  279. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  280. IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
  281. break;
  282. case IMX6QP: /* FALLTHROUGH */
  283. case IMX6Q:
  284. /* power up core phy and enable ref clock */
  285. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  286. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  287. /*
  288. * the async reset input need ref clock to sync internally,
  289. * when the ref clock comes after reset, internal synced
  290. * reset time is too short, cannot meet the requirement.
  291. * add one ~10us delay here.
  292. */
  293. udelay(10);
  294. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  295. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  296. break;
  297. case IMX7D:
  298. break;
  299. }
  300. return ret;
  301. }
  302. static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
  303. {
  304. u32 val;
  305. unsigned int retries;
  306. struct device *dev = imx6_pcie->pci->dev;
  307. for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
  308. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
  309. if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
  310. return;
  311. usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
  312. PHY_PLL_LOCK_WAIT_USLEEP_MAX);
  313. }
  314. dev_err(dev, "PCIe PLL lock timeout\n");
  315. }
  316. static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
  317. {
  318. struct dw_pcie *pci = imx6_pcie->pci;
  319. struct device *dev = pci->dev;
  320. int ret;
  321. if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
  322. ret = regulator_enable(imx6_pcie->vpcie);
  323. if (ret) {
  324. dev_err(dev, "failed to enable vpcie regulator: %d\n",
  325. ret);
  326. return;
  327. }
  328. }
  329. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  330. if (ret) {
  331. dev_err(dev, "unable to enable pcie_phy clock\n");
  332. goto err_pcie_phy;
  333. }
  334. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  335. if (ret) {
  336. dev_err(dev, "unable to enable pcie_bus clock\n");
  337. goto err_pcie_bus;
  338. }
  339. ret = clk_prepare_enable(imx6_pcie->pcie);
  340. if (ret) {
  341. dev_err(dev, "unable to enable pcie clock\n");
  342. goto err_pcie;
  343. }
  344. ret = imx6_pcie_enable_ref_clk(imx6_pcie);
  345. if (ret) {
  346. dev_err(dev, "unable to enable pcie ref clock\n");
  347. goto err_ref_clk;
  348. }
  349. /* allow the clocks to stabilize */
  350. usleep_range(200, 500);
  351. /* Some boards don't have PCIe reset GPIO. */
  352. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  353. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  354. imx6_pcie->gpio_active_high);
  355. msleep(100);
  356. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  357. !imx6_pcie->gpio_active_high);
  358. }
  359. switch (imx6_pcie->variant) {
  360. case IMX7D:
  361. reset_control_deassert(imx6_pcie->pciephy_reset);
  362. imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
  363. break;
  364. case IMX6SX:
  365. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  366. IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
  367. break;
  368. case IMX6QP:
  369. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  370. IMX6Q_GPR1_PCIE_SW_RST, 0);
  371. usleep_range(200, 500);
  372. break;
  373. case IMX6Q: /* Nothing to do */
  374. break;
  375. }
  376. return;
  377. err_ref_clk:
  378. clk_disable_unprepare(imx6_pcie->pcie);
  379. err_pcie:
  380. clk_disable_unprepare(imx6_pcie->pcie_bus);
  381. err_pcie_bus:
  382. clk_disable_unprepare(imx6_pcie->pcie_phy);
  383. err_pcie_phy:
  384. if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
  385. ret = regulator_disable(imx6_pcie->vpcie);
  386. if (ret)
  387. dev_err(dev, "failed to disable vpcie regulator: %d\n",
  388. ret);
  389. }
  390. }
  391. static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
  392. {
  393. switch (imx6_pcie->variant) {
  394. case IMX7D:
  395. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  396. IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
  397. break;
  398. case IMX6SX:
  399. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  400. IMX6SX_GPR12_PCIE_RX_EQ_MASK,
  401. IMX6SX_GPR12_PCIE_RX_EQ_2);
  402. /* FALLTHROUGH */
  403. default:
  404. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  405. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  406. /* configure constant input signal to the pcie ctrl and phy */
  407. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  408. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  409. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  410. IMX6Q_GPR8_TX_DEEMPH_GEN1,
  411. imx6_pcie->tx_deemph_gen1 << 0);
  412. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  413. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
  414. imx6_pcie->tx_deemph_gen2_3p5db << 6);
  415. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  416. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
  417. imx6_pcie->tx_deemph_gen2_6db << 12);
  418. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  419. IMX6Q_GPR8_TX_SWING_FULL,
  420. imx6_pcie->tx_swing_full << 18);
  421. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  422. IMX6Q_GPR8_TX_SWING_LOW,
  423. imx6_pcie->tx_swing_low << 25);
  424. break;
  425. }
  426. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  427. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  428. }
  429. static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
  430. {
  431. struct dw_pcie *pci = imx6_pcie->pci;
  432. struct device *dev = pci->dev;
  433. /* check if the link is up or not */
  434. if (!dw_pcie_wait_for_link(pci))
  435. return 0;
  436. dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  437. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
  438. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
  439. return -ETIMEDOUT;
  440. }
  441. static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
  442. {
  443. struct dw_pcie *pci = imx6_pcie->pci;
  444. struct device *dev = pci->dev;
  445. u32 tmp;
  446. unsigned int retries;
  447. for (retries = 0; retries < 200; retries++) {
  448. tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  449. /* Test if the speed change finished. */
  450. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  451. return 0;
  452. usleep_range(100, 1000);
  453. }
  454. dev_err(dev, "Speed change timeout\n");
  455. return -EINVAL;
  456. }
  457. static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
  458. {
  459. struct imx6_pcie *imx6_pcie = arg;
  460. struct dw_pcie *pci = imx6_pcie->pci;
  461. struct pcie_port *pp = &pci->pp;
  462. return dw_handle_msi_irq(pp);
  463. }
  464. static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
  465. {
  466. struct dw_pcie *pci = imx6_pcie->pci;
  467. struct device *dev = pci->dev;
  468. u32 tmp;
  469. int ret;
  470. /*
  471. * Force Gen1 operation when starting the link. In case the link is
  472. * started in Gen2 mode, there is a possibility the devices on the
  473. * bus will not be detected at all. This happens with PCIe switches.
  474. */
  475. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
  476. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  477. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  478. dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
  479. /* Start LTSSM. */
  480. if (imx6_pcie->variant == IMX7D)
  481. reset_control_deassert(imx6_pcie->apps_reset);
  482. else
  483. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  484. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  485. ret = imx6_pcie_wait_for_link(imx6_pcie);
  486. if (ret)
  487. goto err_reset_phy;
  488. if (imx6_pcie->link_gen == 2) {
  489. /* Allow Gen2 mode after the link is up. */
  490. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
  491. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  492. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  493. dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
  494. /*
  495. * Start Directed Speed Change so the best possible
  496. * speed both link partners support can be negotiated.
  497. */
  498. tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  499. tmp |= PORT_LOGIC_SPEED_CHANGE;
  500. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
  501. if (imx6_pcie->variant != IMX7D) {
  502. /*
  503. * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
  504. * from i.MX6 family when no link speed transition
  505. * occurs and we go Gen1 -> yep, Gen1. The difference
  506. * is that, in such case, it will not be cleared by HW
  507. * which will cause the following code to report false
  508. * failure.
  509. */
  510. ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
  511. if (ret) {
  512. dev_err(dev, "Failed to bring link up!\n");
  513. goto err_reset_phy;
  514. }
  515. }
  516. /* Make sure link training is finished as well! */
  517. ret = imx6_pcie_wait_for_link(imx6_pcie);
  518. if (ret) {
  519. dev_err(dev, "Failed to bring link up!\n");
  520. goto err_reset_phy;
  521. }
  522. } else {
  523. dev_info(dev, "Link: Gen2 disabled\n");
  524. }
  525. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
  526. dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
  527. return 0;
  528. err_reset_phy:
  529. dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
  530. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
  531. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
  532. imx6_pcie_reset_phy(imx6_pcie);
  533. return ret;
  534. }
  535. static int imx6_pcie_host_init(struct pcie_port *pp)
  536. {
  537. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  538. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
  539. imx6_pcie_assert_core_reset(imx6_pcie);
  540. imx6_pcie_init_phy(imx6_pcie);
  541. imx6_pcie_deassert_core_reset(imx6_pcie);
  542. dw_pcie_setup_rc(pp);
  543. imx6_pcie_establish_link(imx6_pcie);
  544. if (IS_ENABLED(CONFIG_PCI_MSI))
  545. dw_pcie_msi_init(pp);
  546. return 0;
  547. }
  548. static int imx6_pcie_link_up(struct dw_pcie *pci)
  549. {
  550. return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
  551. PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
  552. }
  553. static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
  554. .host_init = imx6_pcie_host_init,
  555. };
  556. static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
  557. struct platform_device *pdev)
  558. {
  559. struct dw_pcie *pci = imx6_pcie->pci;
  560. struct pcie_port *pp = &pci->pp;
  561. struct device *dev = &pdev->dev;
  562. int ret;
  563. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  564. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  565. if (pp->msi_irq <= 0) {
  566. dev_err(dev, "failed to get MSI irq\n");
  567. return -ENODEV;
  568. }
  569. ret = devm_request_irq(dev, pp->msi_irq,
  570. imx6_pcie_msi_handler,
  571. IRQF_SHARED | IRQF_NO_THREAD,
  572. "mx6-pcie-msi", imx6_pcie);
  573. if (ret) {
  574. dev_err(dev, "failed to request MSI irq\n");
  575. return ret;
  576. }
  577. }
  578. pp->root_bus_nr = -1;
  579. pp->ops = &imx6_pcie_host_ops;
  580. ret = dw_pcie_host_init(pp);
  581. if (ret) {
  582. dev_err(dev, "failed to initialize host\n");
  583. return ret;
  584. }
  585. return 0;
  586. }
  587. static const struct dw_pcie_ops dw_pcie_ops = {
  588. .link_up = imx6_pcie_link_up,
  589. };
  590. static int imx6_pcie_probe(struct platform_device *pdev)
  591. {
  592. struct device *dev = &pdev->dev;
  593. struct dw_pcie *pci;
  594. struct imx6_pcie *imx6_pcie;
  595. struct resource *dbi_base;
  596. struct device_node *node = dev->of_node;
  597. int ret;
  598. imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
  599. if (!imx6_pcie)
  600. return -ENOMEM;
  601. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  602. if (!pci)
  603. return -ENOMEM;
  604. pci->dev = dev;
  605. pci->ops = &dw_pcie_ops;
  606. imx6_pcie->pci = pci;
  607. imx6_pcie->variant =
  608. (enum imx6_pcie_variants)of_device_get_match_data(dev);
  609. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  610. pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
  611. if (IS_ERR(pci->dbi_base))
  612. return PTR_ERR(pci->dbi_base);
  613. /* Fetch GPIOs */
  614. imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
  615. imx6_pcie->gpio_active_high = of_property_read_bool(node,
  616. "reset-gpio-active-high");
  617. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  618. ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
  619. imx6_pcie->gpio_active_high ?
  620. GPIOF_OUT_INIT_HIGH :
  621. GPIOF_OUT_INIT_LOW,
  622. "PCIe reset");
  623. if (ret) {
  624. dev_err(dev, "unable to get reset gpio\n");
  625. return ret;
  626. }
  627. } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
  628. return imx6_pcie->reset_gpio;
  629. }
  630. /* Fetch clocks */
  631. imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
  632. if (IS_ERR(imx6_pcie->pcie_phy)) {
  633. dev_err(dev, "pcie_phy clock source missing or invalid\n");
  634. return PTR_ERR(imx6_pcie->pcie_phy);
  635. }
  636. imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
  637. if (IS_ERR(imx6_pcie->pcie_bus)) {
  638. dev_err(dev, "pcie_bus clock source missing or invalid\n");
  639. return PTR_ERR(imx6_pcie->pcie_bus);
  640. }
  641. imx6_pcie->pcie = devm_clk_get(dev, "pcie");
  642. if (IS_ERR(imx6_pcie->pcie)) {
  643. dev_err(dev, "pcie clock source missing or invalid\n");
  644. return PTR_ERR(imx6_pcie->pcie);
  645. }
  646. switch (imx6_pcie->variant) {
  647. case IMX6SX:
  648. imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
  649. "pcie_inbound_axi");
  650. if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
  651. dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
  652. return PTR_ERR(imx6_pcie->pcie_inbound_axi);
  653. }
  654. break;
  655. case IMX7D:
  656. imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
  657. "pciephy");
  658. if (IS_ERR(imx6_pcie->pciephy_reset)) {
  659. dev_err(dev, "Failed to get PCIEPHY reset control\n");
  660. return PTR_ERR(imx6_pcie->pciephy_reset);
  661. }
  662. imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
  663. "apps");
  664. if (IS_ERR(imx6_pcie->apps_reset)) {
  665. dev_err(dev, "Failed to get PCIE APPS reset control\n");
  666. return PTR_ERR(imx6_pcie->apps_reset);
  667. }
  668. break;
  669. default:
  670. break;
  671. }
  672. /* Grab GPR config register range */
  673. imx6_pcie->iomuxc_gpr =
  674. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  675. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  676. dev_err(dev, "unable to find iomuxc registers\n");
  677. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  678. }
  679. /* Grab PCIe PHY Tx Settings */
  680. if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
  681. &imx6_pcie->tx_deemph_gen1))
  682. imx6_pcie->tx_deemph_gen1 = 0;
  683. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
  684. &imx6_pcie->tx_deemph_gen2_3p5db))
  685. imx6_pcie->tx_deemph_gen2_3p5db = 0;
  686. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
  687. &imx6_pcie->tx_deemph_gen2_6db))
  688. imx6_pcie->tx_deemph_gen2_6db = 20;
  689. if (of_property_read_u32(node, "fsl,tx-swing-full",
  690. &imx6_pcie->tx_swing_full))
  691. imx6_pcie->tx_swing_full = 127;
  692. if (of_property_read_u32(node, "fsl,tx-swing-low",
  693. &imx6_pcie->tx_swing_low))
  694. imx6_pcie->tx_swing_low = 127;
  695. /* Limit link speed */
  696. ret = of_property_read_u32(node, "fsl,max-link-speed",
  697. &imx6_pcie->link_gen);
  698. if (ret)
  699. imx6_pcie->link_gen = 1;
  700. imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
  701. if (IS_ERR(imx6_pcie->vpcie)) {
  702. if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
  703. return -EPROBE_DEFER;
  704. imx6_pcie->vpcie = NULL;
  705. }
  706. platform_set_drvdata(pdev, imx6_pcie);
  707. ret = imx6_add_pcie_port(imx6_pcie, pdev);
  708. if (ret < 0)
  709. return ret;
  710. return 0;
  711. }
  712. static void imx6_pcie_shutdown(struct platform_device *pdev)
  713. {
  714. struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
  715. /* bring down link, so bootloader gets clean state in case of reboot */
  716. imx6_pcie_assert_core_reset(imx6_pcie);
  717. }
  718. static const struct of_device_id imx6_pcie_of_match[] = {
  719. { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
  720. { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
  721. { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
  722. { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
  723. {},
  724. };
  725. static struct platform_driver imx6_pcie_driver = {
  726. .driver = {
  727. .name = "imx6q-pcie",
  728. .of_match_table = imx6_pcie_of_match,
  729. .suppress_bind_attrs = true,
  730. },
  731. .probe = imx6_pcie_probe,
  732. .shutdown = imx6_pcie_shutdown,
  733. };
  734. static int __init imx6_pcie_init(void)
  735. {
  736. /*
  737. * Since probe() can be deferred we need to make sure that
  738. * hook_fault_code is not called after __init memory is freed
  739. * by kernel and since imx6q_pcie_abort_handler() is a no-op,
  740. * we can install the handler here without risking it
  741. * accessing some uninitialized driver state.
  742. */
  743. hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
  744. "external abort on non-linefetch");
  745. return platform_driver_register(&imx6_pcie_driver);
  746. }
  747. device_initcall(imx6_pcie_init);