pcie-qcom.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * Qualcomm PCIe root complex driver
  3. *
  4. * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  5. * Copyright 2015 Linaro Limited.
  6. *
  7. * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 and
  11. * only version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/delay.h>
  20. #include <linux/gpio.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/iopoll.h>
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_gpio.h>
  28. #include <linux/pci.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/phy/phy.h>
  31. #include <linux/regulator/consumer.h>
  32. #include <linux/reset.h>
  33. #include <linux/slab.h>
  34. #include <linux/types.h>
  35. #include "pcie-designware.h"
  36. #define PCIE20_PARF_SYS_CTRL 0x00
  37. #define PCIE20_PARF_PHY_CTRL 0x40
  38. #define PCIE20_PARF_PHY_REFCLK 0x4C
  39. #define PCIE20_PARF_DBI_BASE_ADDR 0x168
  40. #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
  41. #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
  42. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
  43. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
  44. #define PCIE20_PARF_LTSSM 0x1B0
  45. #define PCIE20_PARF_SID_OFFSET 0x234
  46. #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
  47. #define PCIE20_ELBI_SYS_CTRL 0x04
  48. #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
  49. #define PCIE20_CAP 0x70
  50. #define PERST_DELAY_US 1000
  51. struct qcom_pcie_resources_v0 {
  52. struct clk *iface_clk;
  53. struct clk *core_clk;
  54. struct clk *phy_clk;
  55. struct reset_control *pci_reset;
  56. struct reset_control *axi_reset;
  57. struct reset_control *ahb_reset;
  58. struct reset_control *por_reset;
  59. struct reset_control *phy_reset;
  60. struct regulator *vdda;
  61. struct regulator *vdda_phy;
  62. struct regulator *vdda_refclk;
  63. };
  64. struct qcom_pcie_resources_v1 {
  65. struct clk *iface;
  66. struct clk *aux;
  67. struct clk *master_bus;
  68. struct clk *slave_bus;
  69. struct reset_control *core;
  70. struct regulator *vdda;
  71. };
  72. struct qcom_pcie_resources_v2 {
  73. struct clk *aux_clk;
  74. struct clk *master_clk;
  75. struct clk *slave_clk;
  76. struct clk *cfg_clk;
  77. struct clk *pipe_clk;
  78. };
  79. union qcom_pcie_resources {
  80. struct qcom_pcie_resources_v0 v0;
  81. struct qcom_pcie_resources_v1 v1;
  82. struct qcom_pcie_resources_v2 v2;
  83. };
  84. struct qcom_pcie;
  85. struct qcom_pcie_ops {
  86. int (*get_resources)(struct qcom_pcie *pcie);
  87. int (*init)(struct qcom_pcie *pcie);
  88. int (*post_init)(struct qcom_pcie *pcie);
  89. void (*deinit)(struct qcom_pcie *pcie);
  90. void (*ltssm_enable)(struct qcom_pcie *pcie);
  91. };
  92. struct qcom_pcie {
  93. struct pcie_port pp; /* pp.dbi_base is DT dbi */
  94. void __iomem *parf; /* DT parf */
  95. void __iomem *elbi; /* DT elbi */
  96. union qcom_pcie_resources res;
  97. struct phy *phy;
  98. struct gpio_desc *reset;
  99. struct qcom_pcie_ops *ops;
  100. };
  101. #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp)
  102. static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
  103. {
  104. gpiod_set_value(pcie->reset, 1);
  105. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  106. }
  107. static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
  108. {
  109. gpiod_set_value(pcie->reset, 0);
  110. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  111. }
  112. static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
  113. {
  114. struct pcie_port *pp = arg;
  115. return dw_handle_msi_irq(pp);
  116. }
  117. static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
  118. {
  119. u32 val;
  120. /* enable link training */
  121. val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  122. val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
  123. writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  124. }
  125. static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
  126. {
  127. u32 val;
  128. /* enable link training */
  129. val = readl(pcie->parf + PCIE20_PARF_LTSSM);
  130. val |= BIT(8);
  131. writel(val, pcie->parf + PCIE20_PARF_LTSSM);
  132. }
  133. static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
  134. {
  135. if (dw_pcie_link_up(&pcie->pp))
  136. return 0;
  137. /* Enable Link Training state machine */
  138. if (pcie->ops->ltssm_enable)
  139. pcie->ops->ltssm_enable(pcie);
  140. return dw_pcie_wait_for_link(&pcie->pp);
  141. }
  142. static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
  143. {
  144. struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
  145. struct device *dev = pcie->pp.dev;
  146. res->vdda = devm_regulator_get(dev, "vdda");
  147. if (IS_ERR(res->vdda))
  148. return PTR_ERR(res->vdda);
  149. res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
  150. if (IS_ERR(res->vdda_phy))
  151. return PTR_ERR(res->vdda_phy);
  152. res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
  153. if (IS_ERR(res->vdda_refclk))
  154. return PTR_ERR(res->vdda_refclk);
  155. res->iface_clk = devm_clk_get(dev, "iface");
  156. if (IS_ERR(res->iface_clk))
  157. return PTR_ERR(res->iface_clk);
  158. res->core_clk = devm_clk_get(dev, "core");
  159. if (IS_ERR(res->core_clk))
  160. return PTR_ERR(res->core_clk);
  161. res->phy_clk = devm_clk_get(dev, "phy");
  162. if (IS_ERR(res->phy_clk))
  163. return PTR_ERR(res->phy_clk);
  164. res->pci_reset = devm_reset_control_get(dev, "pci");
  165. if (IS_ERR(res->pci_reset))
  166. return PTR_ERR(res->pci_reset);
  167. res->axi_reset = devm_reset_control_get(dev, "axi");
  168. if (IS_ERR(res->axi_reset))
  169. return PTR_ERR(res->axi_reset);
  170. res->ahb_reset = devm_reset_control_get(dev, "ahb");
  171. if (IS_ERR(res->ahb_reset))
  172. return PTR_ERR(res->ahb_reset);
  173. res->por_reset = devm_reset_control_get(dev, "por");
  174. if (IS_ERR(res->por_reset))
  175. return PTR_ERR(res->por_reset);
  176. res->phy_reset = devm_reset_control_get(dev, "phy");
  177. if (IS_ERR(res->phy_reset))
  178. return PTR_ERR(res->phy_reset);
  179. return 0;
  180. }
  181. static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
  182. {
  183. struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
  184. struct device *dev = pcie->pp.dev;
  185. res->vdda = devm_regulator_get(dev, "vdda");
  186. if (IS_ERR(res->vdda))
  187. return PTR_ERR(res->vdda);
  188. res->iface = devm_clk_get(dev, "iface");
  189. if (IS_ERR(res->iface))
  190. return PTR_ERR(res->iface);
  191. res->aux = devm_clk_get(dev, "aux");
  192. if (IS_ERR(res->aux))
  193. return PTR_ERR(res->aux);
  194. res->master_bus = devm_clk_get(dev, "master_bus");
  195. if (IS_ERR(res->master_bus))
  196. return PTR_ERR(res->master_bus);
  197. res->slave_bus = devm_clk_get(dev, "slave_bus");
  198. if (IS_ERR(res->slave_bus))
  199. return PTR_ERR(res->slave_bus);
  200. res->core = devm_reset_control_get(dev, "core");
  201. if (IS_ERR(res->core))
  202. return PTR_ERR(res->core);
  203. return 0;
  204. }
  205. static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
  206. {
  207. struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
  208. reset_control_assert(res->pci_reset);
  209. reset_control_assert(res->axi_reset);
  210. reset_control_assert(res->ahb_reset);
  211. reset_control_assert(res->por_reset);
  212. reset_control_assert(res->pci_reset);
  213. clk_disable_unprepare(res->iface_clk);
  214. clk_disable_unprepare(res->core_clk);
  215. clk_disable_unprepare(res->phy_clk);
  216. regulator_disable(res->vdda);
  217. regulator_disable(res->vdda_phy);
  218. regulator_disable(res->vdda_refclk);
  219. }
  220. static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
  221. {
  222. struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
  223. struct device *dev = pcie->pp.dev;
  224. u32 val;
  225. int ret;
  226. ret = regulator_enable(res->vdda);
  227. if (ret) {
  228. dev_err(dev, "cannot enable vdda regulator\n");
  229. return ret;
  230. }
  231. ret = regulator_enable(res->vdda_refclk);
  232. if (ret) {
  233. dev_err(dev, "cannot enable vdda_refclk regulator\n");
  234. goto err_refclk;
  235. }
  236. ret = regulator_enable(res->vdda_phy);
  237. if (ret) {
  238. dev_err(dev, "cannot enable vdda_phy regulator\n");
  239. goto err_vdda_phy;
  240. }
  241. ret = reset_control_assert(res->ahb_reset);
  242. if (ret) {
  243. dev_err(dev, "cannot assert ahb reset\n");
  244. goto err_assert_ahb;
  245. }
  246. ret = clk_prepare_enable(res->iface_clk);
  247. if (ret) {
  248. dev_err(dev, "cannot prepare/enable iface clock\n");
  249. goto err_assert_ahb;
  250. }
  251. ret = clk_prepare_enable(res->phy_clk);
  252. if (ret) {
  253. dev_err(dev, "cannot prepare/enable phy clock\n");
  254. goto err_clk_phy;
  255. }
  256. ret = clk_prepare_enable(res->core_clk);
  257. if (ret) {
  258. dev_err(dev, "cannot prepare/enable core clock\n");
  259. goto err_clk_core;
  260. }
  261. ret = reset_control_deassert(res->ahb_reset);
  262. if (ret) {
  263. dev_err(dev, "cannot deassert ahb reset\n");
  264. goto err_deassert_ahb;
  265. }
  266. /* enable PCIe clocks and resets */
  267. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  268. val &= ~BIT(0);
  269. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  270. /* enable external reference clock */
  271. val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
  272. val |= BIT(16);
  273. writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
  274. ret = reset_control_deassert(res->phy_reset);
  275. if (ret) {
  276. dev_err(dev, "cannot deassert phy reset\n");
  277. return ret;
  278. }
  279. ret = reset_control_deassert(res->pci_reset);
  280. if (ret) {
  281. dev_err(dev, "cannot deassert pci reset\n");
  282. return ret;
  283. }
  284. ret = reset_control_deassert(res->por_reset);
  285. if (ret) {
  286. dev_err(dev, "cannot deassert por reset\n");
  287. return ret;
  288. }
  289. ret = reset_control_deassert(res->axi_reset);
  290. if (ret) {
  291. dev_err(dev, "cannot deassert axi reset\n");
  292. return ret;
  293. }
  294. /* wait for clock acquisition */
  295. usleep_range(1000, 1500);
  296. return 0;
  297. err_deassert_ahb:
  298. clk_disable_unprepare(res->core_clk);
  299. err_clk_core:
  300. clk_disable_unprepare(res->phy_clk);
  301. err_clk_phy:
  302. clk_disable_unprepare(res->iface_clk);
  303. err_assert_ahb:
  304. regulator_disable(res->vdda_phy);
  305. err_vdda_phy:
  306. regulator_disable(res->vdda_refclk);
  307. err_refclk:
  308. regulator_disable(res->vdda);
  309. return ret;
  310. }
  311. static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
  312. {
  313. struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
  314. reset_control_assert(res->core);
  315. clk_disable_unprepare(res->slave_bus);
  316. clk_disable_unprepare(res->master_bus);
  317. clk_disable_unprepare(res->iface);
  318. clk_disable_unprepare(res->aux);
  319. regulator_disable(res->vdda);
  320. }
  321. static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
  322. {
  323. struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
  324. struct device *dev = pcie->pp.dev;
  325. int ret;
  326. ret = reset_control_deassert(res->core);
  327. if (ret) {
  328. dev_err(dev, "cannot deassert core reset\n");
  329. return ret;
  330. }
  331. ret = clk_prepare_enable(res->aux);
  332. if (ret) {
  333. dev_err(dev, "cannot prepare/enable aux clock\n");
  334. goto err_res;
  335. }
  336. ret = clk_prepare_enable(res->iface);
  337. if (ret) {
  338. dev_err(dev, "cannot prepare/enable iface clock\n");
  339. goto err_aux;
  340. }
  341. ret = clk_prepare_enable(res->master_bus);
  342. if (ret) {
  343. dev_err(dev, "cannot prepare/enable master_bus clock\n");
  344. goto err_iface;
  345. }
  346. ret = clk_prepare_enable(res->slave_bus);
  347. if (ret) {
  348. dev_err(dev, "cannot prepare/enable slave_bus clock\n");
  349. goto err_master;
  350. }
  351. ret = regulator_enable(res->vdda);
  352. if (ret) {
  353. dev_err(dev, "cannot enable vdda regulator\n");
  354. goto err_slave;
  355. }
  356. /* change DBI base address */
  357. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  358. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  359. u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  360. val |= BIT(31);
  361. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  362. }
  363. return 0;
  364. err_slave:
  365. clk_disable_unprepare(res->slave_bus);
  366. err_master:
  367. clk_disable_unprepare(res->master_bus);
  368. err_iface:
  369. clk_disable_unprepare(res->iface);
  370. err_aux:
  371. clk_disable_unprepare(res->aux);
  372. err_res:
  373. reset_control_assert(res->core);
  374. return ret;
  375. }
  376. static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
  377. {
  378. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  379. struct device *dev = pcie->pp.dev;
  380. res->aux_clk = devm_clk_get(dev, "aux");
  381. if (IS_ERR(res->aux_clk))
  382. return PTR_ERR(res->aux_clk);
  383. res->cfg_clk = devm_clk_get(dev, "cfg");
  384. if (IS_ERR(res->cfg_clk))
  385. return PTR_ERR(res->cfg_clk);
  386. res->master_clk = devm_clk_get(dev, "bus_master");
  387. if (IS_ERR(res->master_clk))
  388. return PTR_ERR(res->master_clk);
  389. res->slave_clk = devm_clk_get(dev, "bus_slave");
  390. if (IS_ERR(res->slave_clk))
  391. return PTR_ERR(res->slave_clk);
  392. res->pipe_clk = devm_clk_get(dev, "pipe");
  393. if (IS_ERR(res->pipe_clk))
  394. return PTR_ERR(res->pipe_clk);
  395. return 0;
  396. }
  397. static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
  398. {
  399. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  400. struct device *dev = pcie->pp.dev;
  401. u32 val;
  402. int ret;
  403. ret = clk_prepare_enable(res->aux_clk);
  404. if (ret) {
  405. dev_err(dev, "cannot prepare/enable aux clock\n");
  406. return ret;
  407. }
  408. ret = clk_prepare_enable(res->cfg_clk);
  409. if (ret) {
  410. dev_err(dev, "cannot prepare/enable cfg clock\n");
  411. goto err_cfg_clk;
  412. }
  413. ret = clk_prepare_enable(res->master_clk);
  414. if (ret) {
  415. dev_err(dev, "cannot prepare/enable master clock\n");
  416. goto err_master_clk;
  417. }
  418. ret = clk_prepare_enable(res->slave_clk);
  419. if (ret) {
  420. dev_err(dev, "cannot prepare/enable slave clock\n");
  421. goto err_slave_clk;
  422. }
  423. /* enable PCIe clocks and resets */
  424. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  425. val &= ~BIT(0);
  426. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  427. /* change DBI base address */
  428. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  429. /* MAC PHY_POWERDOWN MUX DISABLE */
  430. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  431. val &= ~BIT(29);
  432. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  433. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  434. val |= BIT(4);
  435. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  436. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  437. val |= BIT(31);
  438. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  439. return 0;
  440. err_slave_clk:
  441. clk_disable_unprepare(res->master_clk);
  442. err_master_clk:
  443. clk_disable_unprepare(res->cfg_clk);
  444. err_cfg_clk:
  445. clk_disable_unprepare(res->aux_clk);
  446. return ret;
  447. }
  448. static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
  449. {
  450. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  451. struct device *dev = pcie->pp.dev;
  452. int ret;
  453. ret = clk_prepare_enable(res->pipe_clk);
  454. if (ret) {
  455. dev_err(dev, "cannot prepare/enable pipe clock\n");
  456. return ret;
  457. }
  458. return 0;
  459. }
  460. static int qcom_pcie_link_up(struct pcie_port *pp)
  461. {
  462. struct qcom_pcie *pcie = to_qcom_pcie(pp);
  463. u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
  464. return !!(val & PCI_EXP_LNKSTA_DLLLA);
  465. }
  466. static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
  467. {
  468. struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
  469. clk_disable_unprepare(res->pipe_clk);
  470. clk_disable_unprepare(res->slave_clk);
  471. clk_disable_unprepare(res->master_clk);
  472. clk_disable_unprepare(res->cfg_clk);
  473. clk_disable_unprepare(res->aux_clk);
  474. }
  475. static void qcom_pcie_host_init(struct pcie_port *pp)
  476. {
  477. struct qcom_pcie *pcie = to_qcom_pcie(pp);
  478. int ret;
  479. qcom_ep_reset_assert(pcie);
  480. ret = pcie->ops->init(pcie);
  481. if (ret)
  482. goto err_deinit;
  483. ret = phy_power_on(pcie->phy);
  484. if (ret)
  485. goto err_deinit;
  486. if (pcie->ops->post_init)
  487. pcie->ops->post_init(pcie);
  488. dw_pcie_setup_rc(pp);
  489. if (IS_ENABLED(CONFIG_PCI_MSI))
  490. dw_pcie_msi_init(pp);
  491. qcom_ep_reset_deassert(pcie);
  492. ret = qcom_pcie_establish_link(pcie);
  493. if (ret)
  494. goto err;
  495. return;
  496. err:
  497. qcom_ep_reset_assert(pcie);
  498. phy_power_off(pcie->phy);
  499. err_deinit:
  500. pcie->ops->deinit(pcie);
  501. }
  502. static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  503. u32 *val)
  504. {
  505. /* the device class is not reported correctly from the register */
  506. if (where == PCI_CLASS_REVISION && size == 4) {
  507. *val = readl(pp->dbi_base + PCI_CLASS_REVISION);
  508. *val &= 0xff; /* keep revision id */
  509. *val |= PCI_CLASS_BRIDGE_PCI << 16;
  510. return PCIBIOS_SUCCESSFUL;
  511. }
  512. return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
  513. }
  514. static struct pcie_host_ops qcom_pcie_dw_ops = {
  515. .link_up = qcom_pcie_link_up,
  516. .host_init = qcom_pcie_host_init,
  517. .rd_own_conf = qcom_pcie_rd_own_conf,
  518. };
  519. static const struct qcom_pcie_ops ops_v0 = {
  520. .get_resources = qcom_pcie_get_resources_v0,
  521. .init = qcom_pcie_init_v0,
  522. .deinit = qcom_pcie_deinit_v0,
  523. .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
  524. };
  525. static const struct qcom_pcie_ops ops_v1 = {
  526. .get_resources = qcom_pcie_get_resources_v1,
  527. .init = qcom_pcie_init_v1,
  528. .deinit = qcom_pcie_deinit_v1,
  529. .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
  530. };
  531. static const struct qcom_pcie_ops ops_v2 = {
  532. .get_resources = qcom_pcie_get_resources_v2,
  533. .init = qcom_pcie_init_v2,
  534. .post_init = qcom_pcie_post_init_v2,
  535. .deinit = qcom_pcie_deinit_v2,
  536. .ltssm_enable = qcom_pcie_v2_ltssm_enable,
  537. };
  538. static int qcom_pcie_probe(struct platform_device *pdev)
  539. {
  540. struct device *dev = &pdev->dev;
  541. struct resource *res;
  542. struct qcom_pcie *pcie;
  543. struct pcie_port *pp;
  544. int ret;
  545. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  546. if (!pcie)
  547. return -ENOMEM;
  548. pp = &pcie->pp;
  549. pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
  550. pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
  551. if (IS_ERR(pcie->reset))
  552. return PTR_ERR(pcie->reset);
  553. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
  554. pcie->parf = devm_ioremap_resource(dev, res);
  555. if (IS_ERR(pcie->parf))
  556. return PTR_ERR(pcie->parf);
  557. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
  558. pp->dbi_base = devm_ioremap_resource(dev, res);
  559. if (IS_ERR(pp->dbi_base))
  560. return PTR_ERR(pp->dbi_base);
  561. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
  562. pcie->elbi = devm_ioremap_resource(dev, res);
  563. if (IS_ERR(pcie->elbi))
  564. return PTR_ERR(pcie->elbi);
  565. pcie->phy = devm_phy_optional_get(dev, "pciephy");
  566. if (IS_ERR(pcie->phy))
  567. return PTR_ERR(pcie->phy);
  568. pp->dev = dev;
  569. ret = pcie->ops->get_resources(pcie);
  570. if (ret)
  571. return ret;
  572. pp->root_bus_nr = -1;
  573. pp->ops = &qcom_pcie_dw_ops;
  574. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  575. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  576. if (pp->msi_irq < 0)
  577. return pp->msi_irq;
  578. ret = devm_request_irq(dev, pp->msi_irq,
  579. qcom_pcie_msi_irq_handler,
  580. IRQF_SHARED, "qcom-pcie-msi", pp);
  581. if (ret) {
  582. dev_err(dev, "cannot request msi irq\n");
  583. return ret;
  584. }
  585. }
  586. ret = phy_init(pcie->phy);
  587. if (ret)
  588. return ret;
  589. ret = dw_pcie_host_init(pp);
  590. if (ret) {
  591. dev_err(dev, "cannot initialize host\n");
  592. return ret;
  593. }
  594. return 0;
  595. }
  596. static const struct of_device_id qcom_pcie_match[] = {
  597. { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
  598. { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
  599. { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
  600. { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
  601. { }
  602. };
  603. static struct platform_driver qcom_pcie_driver = {
  604. .probe = qcom_pcie_probe,
  605. .driver = {
  606. .name = "qcom-pcie",
  607. .suppress_bind_attrs = true,
  608. .of_match_table = qcom_pcie_match,
  609. },
  610. };
  611. builtin_platform_driver(qcom_pcie_driver);