pcie-qcom.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*
  2. * Qualcomm PCIe root complex driver
  3. *
  4. * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  5. * Copyright 2015 Linaro Limited.
  6. *
  7. * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 and
  11. * only version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/delay.h>
  20. #include <linux/gpio.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/iopoll.h>
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_gpio.h>
  28. #include <linux/pci.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/phy/phy.h>
  31. #include <linux/regulator/consumer.h>
  32. #include <linux/reset.h>
  33. #include <linux/slab.h>
  34. #include <linux/types.h>
  35. #include "pcie-designware.h"
  36. #define PCIE20_PARF_SYS_CTRL 0x00
  37. #define PCIE20_PARF_PHY_CTRL 0x40
  38. #define PCIE20_PARF_PHY_REFCLK 0x4C
  39. #define PCIE20_PARF_DBI_BASE_ADDR 0x168
  40. #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
  41. #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
  42. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
  43. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
  44. #define PCIE20_PARF_LTSSM 0x1B0
  45. #define PCIE20_PARF_SID_OFFSET 0x234
  46. #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
  47. #define PCIE20_ELBI_SYS_CTRL 0x04
  48. #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
  49. #define PCIE20_CAP 0x70
  50. #define PERST_DELAY_US 1000
  51. struct qcom_pcie_resources_v0 {
  52. struct clk *iface_clk;
  53. struct clk *core_clk;
  54. struct clk *phy_clk;
  55. struct reset_control *pci_reset;
  56. struct reset_control *axi_reset;
  57. struct reset_control *ahb_reset;
  58. struct reset_control *por_reset;
  59. struct reset_control *phy_reset;
  60. struct regulator *vdda;
  61. struct regulator *vdda_phy;
  62. struct regulator *vdda_refclk;
  63. };
  64. struct qcom_pcie_resources_v1 {
  65. struct clk *iface;
  66. struct clk *aux;
  67. struct clk *master_bus;
  68. struct clk *slave_bus;
  69. struct reset_control *core;
  70. struct regulator *vdda;
  71. };
  72. struct qcom_pcie_resources_v2 {
  73. struct clk *aux_clk;
  74. struct clk *master_clk;
  75. struct clk *slave_clk;
  76. struct clk *cfg_clk;
  77. struct clk *pipe_clk;
  78. };
  79. union qcom_pcie_resources {
  80. struct qcom_pcie_resources_v0 v0;
  81. struct qcom_pcie_resources_v1 v1;
  82. struct qcom_pcie_resources_v2 v2;
  83. };
  84. struct qcom_pcie;
  85. struct qcom_pcie_ops {
  86. int (*get_resources)(struct qcom_pcie *pcie);
  87. int (*init)(struct qcom_pcie *pcie);
  88. int (*post_init)(struct qcom_pcie *pcie);
  89. void (*deinit)(struct qcom_pcie *pcie);
  90. void (*ltssm_enable)(struct qcom_pcie *pcie);
  91. };
  92. struct qcom_pcie {
  93. struct dw_pcie *pci;
  94. void __iomem *parf; /* DT parf */
  95. void __iomem *elbi; /* DT elbi */
  96. union qcom_pcie_resources res;
  97. struct phy *phy;
  98. struct gpio_desc *reset;
  99. struct qcom_pcie_ops *ops;
  100. };
  101. #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
  102. static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
  103. {
  104. gpiod_set_value(pcie->reset, 1);
  105. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  106. }
  107. static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
  108. {
  109. gpiod_set_value(pcie->reset, 0);
  110. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  111. }
  112. static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
  113. {
  114. struct pcie_port *pp = arg;
  115. return dw_handle_msi_irq(pp);
  116. }
  117. static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
  118. {
  119. u32 val;
  120. /* enable link training */
  121. val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  122. val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
  123. writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  124. }
  125. static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
  126. {
  127. u32 val;
  128. /* enable link training */
  129. val = readl(pcie->parf + PCIE20_PARF_LTSSM);
  130. val |= BIT(8);
  131. writel(val, pcie->parf + PCIE20_PARF_LTSSM);
  132. }
  133. static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
  134. {
  135. struct dw_pcie *pci = pcie->pci;
  136. if (dw_pcie_link_up(pci))
  137. return 0;
  138. /* Enable Link Training state machine */
  139. if (pcie->ops->ltssm_enable)
  140. pcie->ops->ltssm_enable(pcie);
  141. return dw_pcie_wait_for_link(pci);
  142. }
  143. static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
  144. {
  145. struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
  146. struct dw_pcie *pci = pcie->pci;
  147. struct device *dev = pci->dev;
  148. res->vdda = devm_regulator_get(dev, "vdda");
  149. if (IS_ERR(res->vdda))
  150. return PTR_ERR(res->vdda);
  151. res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
  152. if (IS_ERR(res->vdda_phy))
  153. return PTR_ERR(res->vdda_phy);
  154. res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
  155. if (IS_ERR(res->vdda_refclk))
  156. return PTR_ERR(res->vdda_refclk);
  157. res->iface_clk = devm_clk_get(dev, "iface");
  158. if (IS_ERR(res->iface_clk))
  159. return PTR_ERR(res->iface_clk);
  160. res->core_clk = devm_clk_get(dev, "core");
  161. if (IS_ERR(res->core_clk))
  162. return PTR_ERR(res->core_clk);
  163. res->phy_clk = devm_clk_get(dev, "phy");
  164. if (IS_ERR(res->phy_clk))
  165. return PTR_ERR(res->phy_clk);
  166. res->pci_reset = devm_reset_control_get(dev, "pci");
  167. if (IS_ERR(res->pci_reset))
  168. return PTR_ERR(res->pci_reset);
  169. res->axi_reset = devm_reset_control_get(dev, "axi");
  170. if (IS_ERR(res->axi_reset))
  171. return PTR_ERR(res->axi_reset);
  172. res->ahb_reset = devm_reset_control_get(dev, "ahb");
  173. if (IS_ERR(res->ahb_reset))
  174. return PTR_ERR(res->ahb_reset);
  175. res->por_reset = devm_reset_control_get(dev, "por");
  176. if (IS_ERR(res->por_reset))
  177. return PTR_ERR(res->por_reset);
  178. res->phy_reset = devm_reset_control_get(dev, "phy");
  179. return PTR_ERR_OR_ZERO(res->phy_reset);
  180. }
  181. static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
  182. {
  183. struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
  184. struct dw_pcie *pci = pcie->pci;
  185. struct device *dev = pci->dev;
  186. res->vdda = devm_regulator_get(dev, "vdda");
  187. if (IS_ERR(res->vdda))
  188. return PTR_ERR(res->vdda);
  189. res->iface = devm_clk_get(dev, "iface");
  190. if (IS_ERR(res->iface))
  191. return PTR_ERR(res->iface);
  192. res->aux = devm_clk_get(dev, "aux");
  193. if (IS_ERR(res->aux))
  194. return PTR_ERR(res->aux);
  195. res->master_bus = devm_clk_get(dev, "master_bus");
  196. if (IS_ERR(res->master_bus))
  197. return PTR_ERR(res->master_bus);
  198. res->slave_bus = devm_clk_get(dev, "slave_bus");
  199. if (IS_ERR(res->slave_bus))
  200. return PTR_ERR(res->slave_bus);
  201. res->core = devm_reset_control_get(dev, "core");
  202. return PTR_ERR_OR_ZERO(res->core);
  203. }
  204. static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
  205. {
  206. struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
  207. reset_control_assert(res->pci_reset);
  208. reset_control_assert(res->axi_reset);
  209. reset_control_assert(res->ahb_reset);
  210. reset_control_assert(res->por_reset);
  211. reset_control_assert(res->pci_reset);
  212. clk_disable_unprepare(res->iface_clk);
  213. clk_disable_unprepare(res->core_clk);
  214. clk_disable_unprepare(res->phy_clk);
  215. regulator_disable(res->vdda);
  216. regulator_disable(res->vdda_phy);
  217. regulator_disable(res->vdda_refclk);
  218. }
  219. static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
  220. {
  221. struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
  222. struct dw_pcie *pci = pcie->pci;
  223. struct device *dev = pci->dev;
  224. u32 val;
  225. int ret;
  226. ret = regulator_enable(res->vdda);
  227. if (ret) {
  228. dev_err(dev, "cannot enable vdda regulator\n");
  229. return ret;
  230. }
  231. ret = regulator_enable(res->vdda_refclk);
  232. if (ret) {
  233. dev_err(dev, "cannot enable vdda_refclk regulator\n");
  234. goto err_refclk;
  235. }
  236. ret = regulator_enable(res->vdda_phy);
  237. if (ret) {
  238. dev_err(dev, "cannot enable vdda_phy regulator\n");
  239. goto err_vdda_phy;
  240. }
  241. ret = reset_control_assert(res->ahb_reset);
  242. if (ret) {
  243. dev_err(dev, "cannot assert ahb reset\n");
  244. goto err_assert_ahb;
  245. }
  246. ret = clk_prepare_enable(res->iface_clk);
  247. if (ret) {
  248. dev_err(dev, "cannot prepare/enable iface clock\n");
  249. goto err_assert_ahb;
  250. }
  251. ret = clk_prepare_enable(res->phy_clk);
  252. if (ret) {
  253. dev_err(dev, "cannot prepare/enable phy clock\n");
  254. goto err_clk_phy;
  255. }
  256. ret = clk_prepare_enable(res->core_clk);
  257. if (ret) {
  258. dev_err(dev, "cannot prepare/enable core clock\n");
  259. goto err_clk_core;
  260. }
  261. ret = reset_control_deassert(res->ahb_reset);
  262. if (ret) {
  263. dev_err(dev, "cannot deassert ahb reset\n");
  264. goto err_deassert_ahb;
  265. }
  266. /* enable PCIe clocks and resets */
  267. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  268. val &= ~BIT(0);
  269. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  270. /* enable external reference clock */
  271. val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
  272. val |= BIT(16);
  273. writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
  274. ret = reset_control_deassert(res->phy_reset);
  275. if (ret) {
  276. dev_err(dev, "cannot deassert phy reset\n");
  277. return ret;
  278. }
  279. ret = reset_control_deassert(res->pci_reset);
  280. if (ret) {
  281. dev_err(dev, "cannot deassert pci reset\n");
  282. return ret;
  283. }
  284. ret = reset_control_deassert(res->por_reset);
  285. if (ret) {
  286. dev_err(dev, "cannot deassert por reset\n");
  287. return ret;
  288. }
  289. ret = reset_control_deassert(res->axi_reset);
  290. if (ret) {
  291. dev_err(dev, "cannot deassert axi reset\n");
  292. return ret;
  293. }
  294. /* wait for clock acquisition */
  295. usleep_range(1000, 1500);
  296. return 0;
  297. err_deassert_ahb:
  298. clk_disable_unprepare(res->core_clk);
  299. err_clk_core:
  300. clk_disable_unprepare(res->phy_clk);
  301. err_clk_phy:
  302. clk_disable_unprepare(res->iface_clk);
  303. err_assert_ahb:
  304. regulator_disable(res->vdda_phy);
  305. err_vdda_phy:
  306. regulator_disable(res->vdda_refclk);
  307. err_refclk:
  308. regulator_disable(res->vdda);
  309. return ret;
  310. }
  311. static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
  312. {
  313. struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
  314. reset_control_assert(res->core);
  315. clk_disable_unprepare(res->slave_bus);
  316. clk_disable_unprepare(res->master_bus);
  317. clk_disable_unprepare(res->iface);
  318. clk_disable_unprepare(res->aux);
  319. regulator_disable(res->vdda);
  320. }
  321. static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
  322. {
  323. struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
  324. struct dw_pcie *pci = pcie->pci;
  325. struct device *dev = pci->dev;
  326. int ret;
  327. ret = reset_control_deassert(res->core);
  328. if (ret) {
  329. dev_err(dev, "cannot deassert core reset\n");
  330. return ret;
  331. }
  332. ret = clk_prepare_enable(res->aux);
  333. if (ret) {
  334. dev_err(dev, "cannot prepare/enable aux clock\n");
  335. goto err_res;
  336. }
  337. ret = clk_prepare_enable(res->iface);
  338. if (ret) {
  339. dev_err(dev, "cannot prepare/enable iface clock\n");
  340. goto err_aux;
  341. }
  342. ret = clk_prepare_enable(res->master_bus);
  343. if (ret) {
  344. dev_err(dev, "cannot prepare/enable master_bus clock\n");
  345. goto err_iface;
  346. }
  347. ret = clk_prepare_enable(res->slave_bus);
  348. if (ret) {
  349. dev_err(dev, "cannot prepare/enable slave_bus clock\n");
  350. goto err_master;
  351. }
  352. ret = regulator_enable(res->vdda);
  353. if (ret) {
  354. dev_err(dev, "cannot enable vdda regulator\n");
  355. goto err_slave;
  356. }
  357. /* change DBI base address */
  358. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  359. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  360. u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  361. val |= BIT(31);
  362. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  363. }
  364. return 0;
  365. err_slave:
  366. clk_disable_unprepare(res->slave_bus);
  367. err_master:
  368. clk_disable_unprepare(res->master_bus);
  369. err_iface:
  370. clk_disable_unprepare(res->iface);
  371. err_aux:
  372. clk_disable_unprepare(res->aux);
  373. err_res:
  374. reset_control_assert(res->core);
  375. return ret;
  376. }
  377. static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
  378. {
  379. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  380. struct dw_pcie *pci = pcie->pci;
  381. struct device *dev = pci->dev;
  382. res->aux_clk = devm_clk_get(dev, "aux");
  383. if (IS_ERR(res->aux_clk))
  384. return PTR_ERR(res->aux_clk);
  385. res->cfg_clk = devm_clk_get(dev, "cfg");
  386. if (IS_ERR(res->cfg_clk))
  387. return PTR_ERR(res->cfg_clk);
  388. res->master_clk = devm_clk_get(dev, "bus_master");
  389. if (IS_ERR(res->master_clk))
  390. return PTR_ERR(res->master_clk);
  391. res->slave_clk = devm_clk_get(dev, "bus_slave");
  392. if (IS_ERR(res->slave_clk))
  393. return PTR_ERR(res->slave_clk);
  394. res->pipe_clk = devm_clk_get(dev, "pipe");
  395. return PTR_ERR_OR_ZERO(res->pipe_clk);
  396. }
  397. static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
  398. {
  399. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  400. struct dw_pcie *pci = pcie->pci;
  401. struct device *dev = pci->dev;
  402. u32 val;
  403. int ret;
  404. ret = clk_prepare_enable(res->aux_clk);
  405. if (ret) {
  406. dev_err(dev, "cannot prepare/enable aux clock\n");
  407. return ret;
  408. }
  409. ret = clk_prepare_enable(res->cfg_clk);
  410. if (ret) {
  411. dev_err(dev, "cannot prepare/enable cfg clock\n");
  412. goto err_cfg_clk;
  413. }
  414. ret = clk_prepare_enable(res->master_clk);
  415. if (ret) {
  416. dev_err(dev, "cannot prepare/enable master clock\n");
  417. goto err_master_clk;
  418. }
  419. ret = clk_prepare_enable(res->slave_clk);
  420. if (ret) {
  421. dev_err(dev, "cannot prepare/enable slave clock\n");
  422. goto err_slave_clk;
  423. }
  424. /* enable PCIe clocks and resets */
  425. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  426. val &= ~BIT(0);
  427. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  428. /* change DBI base address */
  429. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  430. /* MAC PHY_POWERDOWN MUX DISABLE */
  431. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  432. val &= ~BIT(29);
  433. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  434. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  435. val |= BIT(4);
  436. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  437. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  438. val |= BIT(31);
  439. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  440. return 0;
  441. err_slave_clk:
  442. clk_disable_unprepare(res->master_clk);
  443. err_master_clk:
  444. clk_disable_unprepare(res->cfg_clk);
  445. err_cfg_clk:
  446. clk_disable_unprepare(res->aux_clk);
  447. return ret;
  448. }
  449. static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
  450. {
  451. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  452. struct dw_pcie *pci = pcie->pci;
  453. struct device *dev = pci->dev;
  454. int ret;
  455. ret = clk_prepare_enable(res->pipe_clk);
  456. if (ret) {
  457. dev_err(dev, "cannot prepare/enable pipe clock\n");
  458. return ret;
  459. }
  460. return 0;
  461. }
  462. static int qcom_pcie_link_up(struct dw_pcie *pci)
  463. {
  464. u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
  465. return !!(val & PCI_EXP_LNKSTA_DLLLA);
  466. }
  467. static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
  468. {
  469. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  470. clk_disable_unprepare(res->pipe_clk);
  471. clk_disable_unprepare(res->slave_clk);
  472. clk_disable_unprepare(res->master_clk);
  473. clk_disable_unprepare(res->cfg_clk);
  474. clk_disable_unprepare(res->aux_clk);
  475. }
  476. static void qcom_pcie_host_init(struct pcie_port *pp)
  477. {
  478. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  479. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  480. int ret;
  481. qcom_ep_reset_assert(pcie);
  482. ret = pcie->ops->init(pcie);
  483. if (ret)
  484. goto err_deinit;
  485. ret = phy_power_on(pcie->phy);
  486. if (ret)
  487. goto err_deinit;
  488. if (pcie->ops->post_init)
  489. pcie->ops->post_init(pcie);
  490. dw_pcie_setup_rc(pp);
  491. if (IS_ENABLED(CONFIG_PCI_MSI))
  492. dw_pcie_msi_init(pp);
  493. qcom_ep_reset_deassert(pcie);
  494. ret = qcom_pcie_establish_link(pcie);
  495. if (ret)
  496. goto err;
  497. return;
  498. err:
  499. qcom_ep_reset_assert(pcie);
  500. phy_power_off(pcie->phy);
  501. err_deinit:
  502. pcie->ops->deinit(pcie);
  503. }
  504. static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  505. u32 *val)
  506. {
  507. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  508. /* the device class is not reported correctly from the register */
  509. if (where == PCI_CLASS_REVISION && size == 4) {
  510. *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
  511. *val &= 0xff; /* keep revision id */
  512. *val |= PCI_CLASS_BRIDGE_PCI << 16;
  513. return PCIBIOS_SUCCESSFUL;
  514. }
  515. return dw_pcie_read(pci->dbi_base + where, size, val);
  516. }
  517. static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
  518. .host_init = qcom_pcie_host_init,
  519. .rd_own_conf = qcom_pcie_rd_own_conf,
  520. };
  521. static const struct qcom_pcie_ops ops_v0 = {
  522. .get_resources = qcom_pcie_get_resources_v0,
  523. .init = qcom_pcie_init_v0,
  524. .deinit = qcom_pcie_deinit_v0,
  525. .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
  526. };
  527. static const struct qcom_pcie_ops ops_v1 = {
  528. .get_resources = qcom_pcie_get_resources_v1,
  529. .init = qcom_pcie_init_v1,
  530. .deinit = qcom_pcie_deinit_v1,
  531. .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
  532. };
  533. static const struct qcom_pcie_ops ops_v2 = {
  534. .get_resources = qcom_pcie_get_resources_v2,
  535. .init = qcom_pcie_init_v2,
  536. .post_init = qcom_pcie_post_init_v2,
  537. .deinit = qcom_pcie_deinit_v2,
  538. .ltssm_enable = qcom_pcie_v2_ltssm_enable,
  539. };
  540. static const struct dw_pcie_ops dw_pcie_ops = {
  541. .link_up = qcom_pcie_link_up,
  542. };
  543. static int qcom_pcie_probe(struct platform_device *pdev)
  544. {
  545. struct device *dev = &pdev->dev;
  546. struct resource *res;
  547. struct pcie_port *pp;
  548. struct dw_pcie *pci;
  549. struct qcom_pcie *pcie;
  550. int ret;
  551. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  552. if (!pcie)
  553. return -ENOMEM;
  554. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  555. if (!pci)
  556. return -ENOMEM;
  557. pci->dev = dev;
  558. pci->ops = &dw_pcie_ops;
  559. pp = &pci->pp;
  560. pcie->pci = pci;
  561. pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
  562. pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
  563. if (IS_ERR(pcie->reset))
  564. return PTR_ERR(pcie->reset);
  565. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
  566. pcie->parf = devm_ioremap_resource(dev, res);
  567. if (IS_ERR(pcie->parf))
  568. return PTR_ERR(pcie->parf);
  569. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
  570. pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
  571. if (IS_ERR(pci->dbi_base))
  572. return PTR_ERR(pci->dbi_base);
  573. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
  574. pcie->elbi = devm_ioremap_resource(dev, res);
  575. if (IS_ERR(pcie->elbi))
  576. return PTR_ERR(pcie->elbi);
  577. pcie->phy = devm_phy_optional_get(dev, "pciephy");
  578. if (IS_ERR(pcie->phy))
  579. return PTR_ERR(pcie->phy);
  580. ret = pcie->ops->get_resources(pcie);
  581. if (ret)
  582. return ret;
  583. pp->root_bus_nr = -1;
  584. pp->ops = &qcom_pcie_dw_ops;
  585. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  586. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  587. if (pp->msi_irq < 0)
  588. return pp->msi_irq;
  589. ret = devm_request_irq(dev, pp->msi_irq,
  590. qcom_pcie_msi_irq_handler,
  591. IRQF_SHARED, "qcom-pcie-msi", pp);
  592. if (ret) {
  593. dev_err(dev, "cannot request msi irq\n");
  594. return ret;
  595. }
  596. }
  597. ret = phy_init(pcie->phy);
  598. if (ret)
  599. return ret;
  600. platform_set_drvdata(pdev, pcie);
  601. ret = dw_pcie_host_init(pp);
  602. if (ret) {
  603. dev_err(dev, "cannot initialize host\n");
  604. return ret;
  605. }
  606. return 0;
  607. }
  608. static const struct of_device_id qcom_pcie_match[] = {
  609. { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
  610. { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
  611. { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
  612. { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
  613. { }
  614. };
  615. static struct platform_driver qcom_pcie_driver = {
  616. .probe = qcom_pcie_probe,
  617. .driver = {
  618. .name = "qcom-pcie",
  619. .suppress_bind_attrs = true,
  620. .of_match_table = qcom_pcie_match,
  621. },
  622. };
  623. builtin_platform_driver(qcom_pcie_driver);