pcie-qcom.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Qualcomm PCIe root complex driver
  4. *
  5. * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  6. * Copyright 2015 Linaro Limited.
  7. *
  8. * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/gpio/consumer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/iopoll.h>
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/of_device.h>
  19. #include <linux/of_gpio.h>
  20. #include <linux/pci.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/phy/phy.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/reset.h>
  26. #include <linux/slab.h>
  27. #include <linux/types.h>
  28. #include "pcie-designware.h"
  29. #define PCIE20_PARF_SYS_CTRL 0x00
  30. #define MST_WAKEUP_EN BIT(13)
  31. #define SLV_WAKEUP_EN BIT(12)
  32. #define MSTR_ACLK_CGC_DIS BIT(10)
  33. #define SLV_ACLK_CGC_DIS BIT(9)
  34. #define CORE_CLK_CGC_DIS BIT(6)
  35. #define AUX_PWR_DET BIT(4)
  36. #define L23_CLK_RMV_DIS BIT(2)
  37. #define L1_CLK_RMV_DIS BIT(1)
  38. #define PCIE20_COMMAND_STATUS 0x04
  39. #define CMD_BME_VAL 0x4
  40. #define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
  41. #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
  42. #define PCIE20_PARF_PHY_CTRL 0x40
  43. #define PCIE20_PARF_PHY_REFCLK 0x4C
  44. #define PCIE20_PARF_DBI_BASE_ADDR 0x168
  45. #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
  46. #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
  47. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
  48. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
  49. #define PCIE20_PARF_LTSSM 0x1B0
  50. #define PCIE20_PARF_SID_OFFSET 0x234
  51. #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
  52. #define PCIE20_ELBI_SYS_CTRL 0x04
  53. #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
  54. #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
  55. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
  56. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
  57. #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
  58. #define CFG_BRIDGE_SB_INIT BIT(0)
  59. #define PCIE20_CAP 0x70
  60. #define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
  61. #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
  62. #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
  63. #define PCIE_CAP_LINK1_VAL 0x2FD7F
  64. #define PCIE20_PARF_Q2A_FLUSH 0x1AC
  65. #define PCIE20_MISC_CONTROL_1_REG 0x8BC
  66. #define DBI_RO_WR_EN 1
  67. #define PERST_DELAY_US 1000
  68. #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
  69. #define SLV_ADDR_SPACE_SZ 0x10000000
  70. #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
  71. struct qcom_pcie_resources_2_1_0 {
  72. struct clk *iface_clk;
  73. struct clk *core_clk;
  74. struct clk *phy_clk;
  75. struct reset_control *pci_reset;
  76. struct reset_control *axi_reset;
  77. struct reset_control *ahb_reset;
  78. struct reset_control *por_reset;
  79. struct reset_control *phy_reset;
  80. struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
  81. };
  82. struct qcom_pcie_resources_1_0_0 {
  83. struct clk *iface;
  84. struct clk *aux;
  85. struct clk *master_bus;
  86. struct clk *slave_bus;
  87. struct reset_control *core;
  88. struct regulator *vdda;
  89. };
  90. #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
  91. struct qcom_pcie_resources_2_3_2 {
  92. struct clk *aux_clk;
  93. struct clk *master_clk;
  94. struct clk *slave_clk;
  95. struct clk *cfg_clk;
  96. struct clk *pipe_clk;
  97. struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
  98. };
  99. struct qcom_pcie_resources_2_4_0 {
  100. struct clk *aux_clk;
  101. struct clk *master_clk;
  102. struct clk *slave_clk;
  103. struct reset_control *axi_m_reset;
  104. struct reset_control *axi_s_reset;
  105. struct reset_control *pipe_reset;
  106. struct reset_control *axi_m_vmid_reset;
  107. struct reset_control *axi_s_xpu_reset;
  108. struct reset_control *parf_reset;
  109. struct reset_control *phy_reset;
  110. struct reset_control *axi_m_sticky_reset;
  111. struct reset_control *pipe_sticky_reset;
  112. struct reset_control *pwr_reset;
  113. struct reset_control *ahb_reset;
  114. struct reset_control *phy_ahb_reset;
  115. };
  116. struct qcom_pcie_resources_2_3_3 {
  117. struct clk *iface;
  118. struct clk *axi_m_clk;
  119. struct clk *axi_s_clk;
  120. struct clk *ahb_clk;
  121. struct clk *aux_clk;
  122. struct reset_control *rst[7];
  123. };
  124. union qcom_pcie_resources {
  125. struct qcom_pcie_resources_1_0_0 v1_0_0;
  126. struct qcom_pcie_resources_2_1_0 v2_1_0;
  127. struct qcom_pcie_resources_2_3_2 v2_3_2;
  128. struct qcom_pcie_resources_2_3_3 v2_3_3;
  129. struct qcom_pcie_resources_2_4_0 v2_4_0;
  130. };
  131. struct qcom_pcie;
  132. struct qcom_pcie_ops {
  133. int (*get_resources)(struct qcom_pcie *pcie);
  134. int (*init)(struct qcom_pcie *pcie);
  135. int (*post_init)(struct qcom_pcie *pcie);
  136. void (*deinit)(struct qcom_pcie *pcie);
  137. void (*post_deinit)(struct qcom_pcie *pcie);
  138. void (*ltssm_enable)(struct qcom_pcie *pcie);
  139. };
  140. struct qcom_pcie {
  141. struct dw_pcie *pci;
  142. void __iomem *parf; /* DT parf */
  143. void __iomem *elbi; /* DT elbi */
  144. union qcom_pcie_resources res;
  145. struct phy *phy;
  146. struct gpio_desc *reset;
  147. const struct qcom_pcie_ops *ops;
  148. };
  149. #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
  150. static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
  151. {
  152. gpiod_set_value_cansleep(pcie->reset, 1);
  153. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  154. }
  155. static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
  156. {
  157. gpiod_set_value_cansleep(pcie->reset, 0);
  158. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  159. }
  160. static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
  161. {
  162. struct dw_pcie *pci = pcie->pci;
  163. if (dw_pcie_link_up(pci))
  164. return 0;
  165. /* Enable Link Training state machine */
  166. if (pcie->ops->ltssm_enable)
  167. pcie->ops->ltssm_enable(pcie);
  168. return dw_pcie_wait_for_link(pci);
  169. }
  170. static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
  171. {
  172. u32 val;
  173. /* enable link training */
  174. val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  175. val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
  176. writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  177. }
  178. static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
  179. {
  180. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  181. struct dw_pcie *pci = pcie->pci;
  182. struct device *dev = pci->dev;
  183. int ret;
  184. res->supplies[0].supply = "vdda";
  185. res->supplies[1].supply = "vdda_phy";
  186. res->supplies[2].supply = "vdda_refclk";
  187. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  188. res->supplies);
  189. if (ret)
  190. return ret;
  191. res->iface_clk = devm_clk_get(dev, "iface");
  192. if (IS_ERR(res->iface_clk))
  193. return PTR_ERR(res->iface_clk);
  194. res->core_clk = devm_clk_get(dev, "core");
  195. if (IS_ERR(res->core_clk))
  196. return PTR_ERR(res->core_clk);
  197. res->phy_clk = devm_clk_get(dev, "phy");
  198. if (IS_ERR(res->phy_clk))
  199. return PTR_ERR(res->phy_clk);
  200. res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
  201. if (IS_ERR(res->pci_reset))
  202. return PTR_ERR(res->pci_reset);
  203. res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
  204. if (IS_ERR(res->axi_reset))
  205. return PTR_ERR(res->axi_reset);
  206. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  207. if (IS_ERR(res->ahb_reset))
  208. return PTR_ERR(res->ahb_reset);
  209. res->por_reset = devm_reset_control_get_exclusive(dev, "por");
  210. if (IS_ERR(res->por_reset))
  211. return PTR_ERR(res->por_reset);
  212. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  213. return PTR_ERR_OR_ZERO(res->phy_reset);
  214. }
  215. static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
  216. {
  217. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  218. reset_control_assert(res->pci_reset);
  219. reset_control_assert(res->axi_reset);
  220. reset_control_assert(res->ahb_reset);
  221. reset_control_assert(res->por_reset);
  222. reset_control_assert(res->pci_reset);
  223. clk_disable_unprepare(res->iface_clk);
  224. clk_disable_unprepare(res->core_clk);
  225. clk_disable_unprepare(res->phy_clk);
  226. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  227. }
  228. static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
  229. {
  230. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  231. struct dw_pcie *pci = pcie->pci;
  232. struct device *dev = pci->dev;
  233. u32 val;
  234. int ret;
  235. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  236. if (ret < 0) {
  237. dev_err(dev, "cannot enable regulators\n");
  238. return ret;
  239. }
  240. ret = reset_control_assert(res->ahb_reset);
  241. if (ret) {
  242. dev_err(dev, "cannot assert ahb reset\n");
  243. goto err_assert_ahb;
  244. }
  245. ret = clk_prepare_enable(res->iface_clk);
  246. if (ret) {
  247. dev_err(dev, "cannot prepare/enable iface clock\n");
  248. goto err_assert_ahb;
  249. }
  250. ret = clk_prepare_enable(res->phy_clk);
  251. if (ret) {
  252. dev_err(dev, "cannot prepare/enable phy clock\n");
  253. goto err_clk_phy;
  254. }
  255. ret = clk_prepare_enable(res->core_clk);
  256. if (ret) {
  257. dev_err(dev, "cannot prepare/enable core clock\n");
  258. goto err_clk_core;
  259. }
  260. ret = reset_control_deassert(res->ahb_reset);
  261. if (ret) {
  262. dev_err(dev, "cannot deassert ahb reset\n");
  263. goto err_deassert_ahb;
  264. }
  265. /* enable PCIe clocks and resets */
  266. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  267. val &= ~BIT(0);
  268. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  269. /* enable external reference clock */
  270. val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
  271. val |= BIT(16);
  272. writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
  273. ret = reset_control_deassert(res->phy_reset);
  274. if (ret) {
  275. dev_err(dev, "cannot deassert phy reset\n");
  276. return ret;
  277. }
  278. ret = reset_control_deassert(res->pci_reset);
  279. if (ret) {
  280. dev_err(dev, "cannot deassert pci reset\n");
  281. return ret;
  282. }
  283. ret = reset_control_deassert(res->por_reset);
  284. if (ret) {
  285. dev_err(dev, "cannot deassert por reset\n");
  286. return ret;
  287. }
  288. ret = reset_control_deassert(res->axi_reset);
  289. if (ret) {
  290. dev_err(dev, "cannot deassert axi reset\n");
  291. return ret;
  292. }
  293. /* wait for clock acquisition */
  294. usleep_range(1000, 1500);
  295. /* Set the Max TLP size to 2K, instead of using default of 4K */
  296. writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
  297. pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
  298. writel(CFG_BRIDGE_SB_INIT,
  299. pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
  300. return 0;
  301. err_deassert_ahb:
  302. clk_disable_unprepare(res->core_clk);
  303. err_clk_core:
  304. clk_disable_unprepare(res->phy_clk);
  305. err_clk_phy:
  306. clk_disable_unprepare(res->iface_clk);
  307. err_assert_ahb:
  308. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  309. return ret;
  310. }
  311. static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
  312. {
  313. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  314. struct dw_pcie *pci = pcie->pci;
  315. struct device *dev = pci->dev;
  316. res->vdda = devm_regulator_get(dev, "vdda");
  317. if (IS_ERR(res->vdda))
  318. return PTR_ERR(res->vdda);
  319. res->iface = devm_clk_get(dev, "iface");
  320. if (IS_ERR(res->iface))
  321. return PTR_ERR(res->iface);
  322. res->aux = devm_clk_get(dev, "aux");
  323. if (IS_ERR(res->aux))
  324. return PTR_ERR(res->aux);
  325. res->master_bus = devm_clk_get(dev, "master_bus");
  326. if (IS_ERR(res->master_bus))
  327. return PTR_ERR(res->master_bus);
  328. res->slave_bus = devm_clk_get(dev, "slave_bus");
  329. if (IS_ERR(res->slave_bus))
  330. return PTR_ERR(res->slave_bus);
  331. res->core = devm_reset_control_get_exclusive(dev, "core");
  332. return PTR_ERR_OR_ZERO(res->core);
  333. }
  334. static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
  335. {
  336. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  337. reset_control_assert(res->core);
  338. clk_disable_unprepare(res->slave_bus);
  339. clk_disable_unprepare(res->master_bus);
  340. clk_disable_unprepare(res->iface);
  341. clk_disable_unprepare(res->aux);
  342. regulator_disable(res->vdda);
  343. }
  344. static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
  345. {
  346. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  347. struct dw_pcie *pci = pcie->pci;
  348. struct device *dev = pci->dev;
  349. int ret;
  350. ret = reset_control_deassert(res->core);
  351. if (ret) {
  352. dev_err(dev, "cannot deassert core reset\n");
  353. return ret;
  354. }
  355. ret = clk_prepare_enable(res->aux);
  356. if (ret) {
  357. dev_err(dev, "cannot prepare/enable aux clock\n");
  358. goto err_res;
  359. }
  360. ret = clk_prepare_enable(res->iface);
  361. if (ret) {
  362. dev_err(dev, "cannot prepare/enable iface clock\n");
  363. goto err_aux;
  364. }
  365. ret = clk_prepare_enable(res->master_bus);
  366. if (ret) {
  367. dev_err(dev, "cannot prepare/enable master_bus clock\n");
  368. goto err_iface;
  369. }
  370. ret = clk_prepare_enable(res->slave_bus);
  371. if (ret) {
  372. dev_err(dev, "cannot prepare/enable slave_bus clock\n");
  373. goto err_master;
  374. }
  375. ret = regulator_enable(res->vdda);
  376. if (ret) {
  377. dev_err(dev, "cannot enable vdda regulator\n");
  378. goto err_slave;
  379. }
  380. /* change DBI base address */
  381. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  382. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  383. u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  384. val |= BIT(31);
  385. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  386. }
  387. return 0;
  388. err_slave:
  389. clk_disable_unprepare(res->slave_bus);
  390. err_master:
  391. clk_disable_unprepare(res->master_bus);
  392. err_iface:
  393. clk_disable_unprepare(res->iface);
  394. err_aux:
  395. clk_disable_unprepare(res->aux);
  396. err_res:
  397. reset_control_assert(res->core);
  398. return ret;
  399. }
  400. static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
  401. {
  402. u32 val;
  403. /* enable link training */
  404. val = readl(pcie->parf + PCIE20_PARF_LTSSM);
  405. val |= BIT(8);
  406. writel(val, pcie->parf + PCIE20_PARF_LTSSM);
  407. }
  408. static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
  409. {
  410. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  411. struct dw_pcie *pci = pcie->pci;
  412. struct device *dev = pci->dev;
  413. int ret;
  414. res->supplies[0].supply = "vdda";
  415. res->supplies[1].supply = "vddpe-3v3";
  416. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  417. res->supplies);
  418. if (ret)
  419. return ret;
  420. res->aux_clk = devm_clk_get(dev, "aux");
  421. if (IS_ERR(res->aux_clk))
  422. return PTR_ERR(res->aux_clk);
  423. res->cfg_clk = devm_clk_get(dev, "cfg");
  424. if (IS_ERR(res->cfg_clk))
  425. return PTR_ERR(res->cfg_clk);
  426. res->master_clk = devm_clk_get(dev, "bus_master");
  427. if (IS_ERR(res->master_clk))
  428. return PTR_ERR(res->master_clk);
  429. res->slave_clk = devm_clk_get(dev, "bus_slave");
  430. if (IS_ERR(res->slave_clk))
  431. return PTR_ERR(res->slave_clk);
  432. res->pipe_clk = devm_clk_get(dev, "pipe");
  433. return PTR_ERR_OR_ZERO(res->pipe_clk);
  434. }
  435. static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
  436. {
  437. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  438. clk_disable_unprepare(res->slave_clk);
  439. clk_disable_unprepare(res->master_clk);
  440. clk_disable_unprepare(res->cfg_clk);
  441. clk_disable_unprepare(res->aux_clk);
  442. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  443. }
  444. static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
  445. {
  446. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  447. clk_disable_unprepare(res->pipe_clk);
  448. }
  449. static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
  450. {
  451. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  452. struct dw_pcie *pci = pcie->pci;
  453. struct device *dev = pci->dev;
  454. u32 val;
  455. int ret;
  456. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  457. if (ret < 0) {
  458. dev_err(dev, "cannot enable regulators\n");
  459. return ret;
  460. }
  461. ret = clk_prepare_enable(res->aux_clk);
  462. if (ret) {
  463. dev_err(dev, "cannot prepare/enable aux clock\n");
  464. goto err_aux_clk;
  465. }
  466. ret = clk_prepare_enable(res->cfg_clk);
  467. if (ret) {
  468. dev_err(dev, "cannot prepare/enable cfg clock\n");
  469. goto err_cfg_clk;
  470. }
  471. ret = clk_prepare_enable(res->master_clk);
  472. if (ret) {
  473. dev_err(dev, "cannot prepare/enable master clock\n");
  474. goto err_master_clk;
  475. }
  476. ret = clk_prepare_enable(res->slave_clk);
  477. if (ret) {
  478. dev_err(dev, "cannot prepare/enable slave clock\n");
  479. goto err_slave_clk;
  480. }
  481. /* enable PCIe clocks and resets */
  482. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  483. val &= ~BIT(0);
  484. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  485. /* change DBI base address */
  486. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  487. /* MAC PHY_POWERDOWN MUX DISABLE */
  488. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  489. val &= ~BIT(29);
  490. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  491. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  492. val |= BIT(4);
  493. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  494. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  495. val |= BIT(31);
  496. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  497. return 0;
  498. err_slave_clk:
  499. clk_disable_unprepare(res->master_clk);
  500. err_master_clk:
  501. clk_disable_unprepare(res->cfg_clk);
  502. err_cfg_clk:
  503. clk_disable_unprepare(res->aux_clk);
  504. err_aux_clk:
  505. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  506. return ret;
  507. }
  508. static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
  509. {
  510. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  511. struct dw_pcie *pci = pcie->pci;
  512. struct device *dev = pci->dev;
  513. int ret;
  514. ret = clk_prepare_enable(res->pipe_clk);
  515. if (ret) {
  516. dev_err(dev, "cannot prepare/enable pipe clock\n");
  517. return ret;
  518. }
  519. return 0;
  520. }
  521. static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
  522. {
  523. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  524. struct dw_pcie *pci = pcie->pci;
  525. struct device *dev = pci->dev;
  526. res->aux_clk = devm_clk_get(dev, "aux");
  527. if (IS_ERR(res->aux_clk))
  528. return PTR_ERR(res->aux_clk);
  529. res->master_clk = devm_clk_get(dev, "master_bus");
  530. if (IS_ERR(res->master_clk))
  531. return PTR_ERR(res->master_clk);
  532. res->slave_clk = devm_clk_get(dev, "slave_bus");
  533. if (IS_ERR(res->slave_clk))
  534. return PTR_ERR(res->slave_clk);
  535. res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
  536. if (IS_ERR(res->axi_m_reset))
  537. return PTR_ERR(res->axi_m_reset);
  538. res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
  539. if (IS_ERR(res->axi_s_reset))
  540. return PTR_ERR(res->axi_s_reset);
  541. res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
  542. if (IS_ERR(res->pipe_reset))
  543. return PTR_ERR(res->pipe_reset);
  544. res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
  545. "axi_m_vmid");
  546. if (IS_ERR(res->axi_m_vmid_reset))
  547. return PTR_ERR(res->axi_m_vmid_reset);
  548. res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
  549. "axi_s_xpu");
  550. if (IS_ERR(res->axi_s_xpu_reset))
  551. return PTR_ERR(res->axi_s_xpu_reset);
  552. res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
  553. if (IS_ERR(res->parf_reset))
  554. return PTR_ERR(res->parf_reset);
  555. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  556. if (IS_ERR(res->phy_reset))
  557. return PTR_ERR(res->phy_reset);
  558. res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
  559. "axi_m_sticky");
  560. if (IS_ERR(res->axi_m_sticky_reset))
  561. return PTR_ERR(res->axi_m_sticky_reset);
  562. res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
  563. "pipe_sticky");
  564. if (IS_ERR(res->pipe_sticky_reset))
  565. return PTR_ERR(res->pipe_sticky_reset);
  566. res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
  567. if (IS_ERR(res->pwr_reset))
  568. return PTR_ERR(res->pwr_reset);
  569. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  570. if (IS_ERR(res->ahb_reset))
  571. return PTR_ERR(res->ahb_reset);
  572. res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
  573. if (IS_ERR(res->phy_ahb_reset))
  574. return PTR_ERR(res->phy_ahb_reset);
  575. return 0;
  576. }
  577. static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
  578. {
  579. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  580. reset_control_assert(res->axi_m_reset);
  581. reset_control_assert(res->axi_s_reset);
  582. reset_control_assert(res->pipe_reset);
  583. reset_control_assert(res->pipe_sticky_reset);
  584. reset_control_assert(res->phy_reset);
  585. reset_control_assert(res->phy_ahb_reset);
  586. reset_control_assert(res->axi_m_sticky_reset);
  587. reset_control_assert(res->pwr_reset);
  588. reset_control_assert(res->ahb_reset);
  589. clk_disable_unprepare(res->aux_clk);
  590. clk_disable_unprepare(res->master_clk);
  591. clk_disable_unprepare(res->slave_clk);
  592. }
  593. static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
  594. {
  595. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  596. struct dw_pcie *pci = pcie->pci;
  597. struct device *dev = pci->dev;
  598. u32 val;
  599. int ret;
  600. ret = reset_control_assert(res->axi_m_reset);
  601. if (ret) {
  602. dev_err(dev, "cannot assert axi master reset\n");
  603. return ret;
  604. }
  605. ret = reset_control_assert(res->axi_s_reset);
  606. if (ret) {
  607. dev_err(dev, "cannot assert axi slave reset\n");
  608. return ret;
  609. }
  610. usleep_range(10000, 12000);
  611. ret = reset_control_assert(res->pipe_reset);
  612. if (ret) {
  613. dev_err(dev, "cannot assert pipe reset\n");
  614. return ret;
  615. }
  616. ret = reset_control_assert(res->pipe_sticky_reset);
  617. if (ret) {
  618. dev_err(dev, "cannot assert pipe sticky reset\n");
  619. return ret;
  620. }
  621. ret = reset_control_assert(res->phy_reset);
  622. if (ret) {
  623. dev_err(dev, "cannot assert phy reset\n");
  624. return ret;
  625. }
  626. ret = reset_control_assert(res->phy_ahb_reset);
  627. if (ret) {
  628. dev_err(dev, "cannot assert phy ahb reset\n");
  629. return ret;
  630. }
  631. usleep_range(10000, 12000);
  632. ret = reset_control_assert(res->axi_m_sticky_reset);
  633. if (ret) {
  634. dev_err(dev, "cannot assert axi master sticky reset\n");
  635. return ret;
  636. }
  637. ret = reset_control_assert(res->pwr_reset);
  638. if (ret) {
  639. dev_err(dev, "cannot assert power reset\n");
  640. return ret;
  641. }
  642. ret = reset_control_assert(res->ahb_reset);
  643. if (ret) {
  644. dev_err(dev, "cannot assert ahb reset\n");
  645. return ret;
  646. }
  647. usleep_range(10000, 12000);
  648. ret = reset_control_deassert(res->phy_ahb_reset);
  649. if (ret) {
  650. dev_err(dev, "cannot deassert phy ahb reset\n");
  651. return ret;
  652. }
  653. ret = reset_control_deassert(res->phy_reset);
  654. if (ret) {
  655. dev_err(dev, "cannot deassert phy reset\n");
  656. goto err_rst_phy;
  657. }
  658. ret = reset_control_deassert(res->pipe_reset);
  659. if (ret) {
  660. dev_err(dev, "cannot deassert pipe reset\n");
  661. goto err_rst_pipe;
  662. }
  663. ret = reset_control_deassert(res->pipe_sticky_reset);
  664. if (ret) {
  665. dev_err(dev, "cannot deassert pipe sticky reset\n");
  666. goto err_rst_pipe_sticky;
  667. }
  668. usleep_range(10000, 12000);
  669. ret = reset_control_deassert(res->axi_m_reset);
  670. if (ret) {
  671. dev_err(dev, "cannot deassert axi master reset\n");
  672. goto err_rst_axi_m;
  673. }
  674. ret = reset_control_deassert(res->axi_m_sticky_reset);
  675. if (ret) {
  676. dev_err(dev, "cannot deassert axi master sticky reset\n");
  677. goto err_rst_axi_m_sticky;
  678. }
  679. ret = reset_control_deassert(res->axi_s_reset);
  680. if (ret) {
  681. dev_err(dev, "cannot deassert axi slave reset\n");
  682. goto err_rst_axi_s;
  683. }
  684. ret = reset_control_deassert(res->pwr_reset);
  685. if (ret) {
  686. dev_err(dev, "cannot deassert power reset\n");
  687. goto err_rst_pwr;
  688. }
  689. ret = reset_control_deassert(res->ahb_reset);
  690. if (ret) {
  691. dev_err(dev, "cannot deassert ahb reset\n");
  692. goto err_rst_ahb;
  693. }
  694. usleep_range(10000, 12000);
  695. ret = clk_prepare_enable(res->aux_clk);
  696. if (ret) {
  697. dev_err(dev, "cannot prepare/enable iface clock\n");
  698. goto err_clk_aux;
  699. }
  700. ret = clk_prepare_enable(res->master_clk);
  701. if (ret) {
  702. dev_err(dev, "cannot prepare/enable core clock\n");
  703. goto err_clk_axi_m;
  704. }
  705. ret = clk_prepare_enable(res->slave_clk);
  706. if (ret) {
  707. dev_err(dev, "cannot prepare/enable phy clock\n");
  708. goto err_clk_axi_s;
  709. }
  710. /* enable PCIe clocks and resets */
  711. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  712. val &= ~BIT(0);
  713. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  714. /* change DBI base address */
  715. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  716. /* MAC PHY_POWERDOWN MUX DISABLE */
  717. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  718. val &= ~BIT(29);
  719. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  720. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  721. val |= BIT(4);
  722. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  723. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  724. val |= BIT(31);
  725. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  726. return 0;
  727. err_clk_axi_s:
  728. clk_disable_unprepare(res->master_clk);
  729. err_clk_axi_m:
  730. clk_disable_unprepare(res->aux_clk);
  731. err_clk_aux:
  732. reset_control_assert(res->ahb_reset);
  733. err_rst_ahb:
  734. reset_control_assert(res->pwr_reset);
  735. err_rst_pwr:
  736. reset_control_assert(res->axi_s_reset);
  737. err_rst_axi_s:
  738. reset_control_assert(res->axi_m_sticky_reset);
  739. err_rst_axi_m_sticky:
  740. reset_control_assert(res->axi_m_reset);
  741. err_rst_axi_m:
  742. reset_control_assert(res->pipe_sticky_reset);
  743. err_rst_pipe_sticky:
  744. reset_control_assert(res->pipe_reset);
  745. err_rst_pipe:
  746. reset_control_assert(res->phy_reset);
  747. err_rst_phy:
  748. reset_control_assert(res->phy_ahb_reset);
  749. return ret;
  750. }
  751. static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
  752. {
  753. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  754. struct dw_pcie *pci = pcie->pci;
  755. struct device *dev = pci->dev;
  756. int i;
  757. const char *rst_names[] = { "axi_m", "axi_s", "pipe",
  758. "axi_m_sticky", "sticky",
  759. "ahb", "sleep", };
  760. res->iface = devm_clk_get(dev, "iface");
  761. if (IS_ERR(res->iface))
  762. return PTR_ERR(res->iface);
  763. res->axi_m_clk = devm_clk_get(dev, "axi_m");
  764. if (IS_ERR(res->axi_m_clk))
  765. return PTR_ERR(res->axi_m_clk);
  766. res->axi_s_clk = devm_clk_get(dev, "axi_s");
  767. if (IS_ERR(res->axi_s_clk))
  768. return PTR_ERR(res->axi_s_clk);
  769. res->ahb_clk = devm_clk_get(dev, "ahb");
  770. if (IS_ERR(res->ahb_clk))
  771. return PTR_ERR(res->ahb_clk);
  772. res->aux_clk = devm_clk_get(dev, "aux");
  773. if (IS_ERR(res->aux_clk))
  774. return PTR_ERR(res->aux_clk);
  775. for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
  776. res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
  777. if (IS_ERR(res->rst[i]))
  778. return PTR_ERR(res->rst[i]);
  779. }
  780. return 0;
  781. }
  782. static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
  783. {
  784. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  785. clk_disable_unprepare(res->iface);
  786. clk_disable_unprepare(res->axi_m_clk);
  787. clk_disable_unprepare(res->axi_s_clk);
  788. clk_disable_unprepare(res->ahb_clk);
  789. clk_disable_unprepare(res->aux_clk);
  790. }
  791. static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
  792. {
  793. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  794. struct dw_pcie *pci = pcie->pci;
  795. struct device *dev = pci->dev;
  796. int i, ret;
  797. u32 val;
  798. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  799. ret = reset_control_assert(res->rst[i]);
  800. if (ret) {
  801. dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
  802. return ret;
  803. }
  804. }
  805. usleep_range(2000, 2500);
  806. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  807. ret = reset_control_deassert(res->rst[i]);
  808. if (ret) {
  809. dev_err(dev, "reset #%d deassert failed (%d)\n", i,
  810. ret);
  811. return ret;
  812. }
  813. }
  814. /*
  815. * Don't have a way to see if the reset has completed.
  816. * Wait for some time.
  817. */
  818. usleep_range(2000, 2500);
  819. ret = clk_prepare_enable(res->iface);
  820. if (ret) {
  821. dev_err(dev, "cannot prepare/enable core clock\n");
  822. goto err_clk_iface;
  823. }
  824. ret = clk_prepare_enable(res->axi_m_clk);
  825. if (ret) {
  826. dev_err(dev, "cannot prepare/enable core clock\n");
  827. goto err_clk_axi_m;
  828. }
  829. ret = clk_prepare_enable(res->axi_s_clk);
  830. if (ret) {
  831. dev_err(dev, "cannot prepare/enable axi slave clock\n");
  832. goto err_clk_axi_s;
  833. }
  834. ret = clk_prepare_enable(res->ahb_clk);
  835. if (ret) {
  836. dev_err(dev, "cannot prepare/enable ahb clock\n");
  837. goto err_clk_ahb;
  838. }
  839. ret = clk_prepare_enable(res->aux_clk);
  840. if (ret) {
  841. dev_err(dev, "cannot prepare/enable aux clock\n");
  842. goto err_clk_aux;
  843. }
  844. writel(SLV_ADDR_SPACE_SZ,
  845. pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
  846. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  847. val &= ~BIT(0);
  848. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  849. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  850. writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
  851. | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
  852. AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
  853. pcie->parf + PCIE20_PARF_SYS_CTRL);
  854. writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
  855. writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
  856. writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
  857. writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
  858. val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
  859. val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
  860. writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
  861. writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
  862. PCIE20_DEVICE_CONTROL2_STATUS2);
  863. return 0;
  864. err_clk_aux:
  865. clk_disable_unprepare(res->ahb_clk);
  866. err_clk_ahb:
  867. clk_disable_unprepare(res->axi_s_clk);
  868. err_clk_axi_s:
  869. clk_disable_unprepare(res->axi_m_clk);
  870. err_clk_axi_m:
  871. clk_disable_unprepare(res->iface);
  872. err_clk_iface:
  873. /*
  874. * Not checking for failure, will anyway return
  875. * the original failure in 'ret'.
  876. */
  877. for (i = 0; i < ARRAY_SIZE(res->rst); i++)
  878. reset_control_assert(res->rst[i]);
  879. return ret;
  880. }
  881. static int qcom_pcie_link_up(struct dw_pcie *pci)
  882. {
  883. u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
  884. return !!(val & PCI_EXP_LNKSTA_DLLLA);
  885. }
  886. static int qcom_pcie_host_init(struct pcie_port *pp)
  887. {
  888. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  889. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  890. int ret;
  891. pm_runtime_get_sync(pci->dev);
  892. qcom_ep_reset_assert(pcie);
  893. ret = pcie->ops->init(pcie);
  894. if (ret)
  895. return ret;
  896. ret = phy_power_on(pcie->phy);
  897. if (ret)
  898. goto err_deinit;
  899. if (pcie->ops->post_init) {
  900. ret = pcie->ops->post_init(pcie);
  901. if (ret)
  902. goto err_disable_phy;
  903. }
  904. dw_pcie_setup_rc(pp);
  905. if (IS_ENABLED(CONFIG_PCI_MSI))
  906. dw_pcie_msi_init(pp);
  907. qcom_ep_reset_deassert(pcie);
  908. ret = qcom_pcie_establish_link(pcie);
  909. if (ret)
  910. goto err;
  911. return 0;
  912. err:
  913. qcom_ep_reset_assert(pcie);
  914. if (pcie->ops->post_deinit)
  915. pcie->ops->post_deinit(pcie);
  916. err_disable_phy:
  917. phy_power_off(pcie->phy);
  918. err_deinit:
  919. pcie->ops->deinit(pcie);
  920. pm_runtime_put(pci->dev);
  921. return ret;
  922. }
  923. static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  924. u32 *val)
  925. {
  926. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  927. /* the device class is not reported correctly from the register */
  928. if (where == PCI_CLASS_REVISION && size == 4) {
  929. *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
  930. *val &= 0xff; /* keep revision id */
  931. *val |= PCI_CLASS_BRIDGE_PCI << 16;
  932. return PCIBIOS_SUCCESSFUL;
  933. }
  934. return dw_pcie_read(pci->dbi_base + where, size, val);
  935. }
  936. static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
  937. .host_init = qcom_pcie_host_init,
  938. .rd_own_conf = qcom_pcie_rd_own_conf,
  939. };
  940. /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
  941. static const struct qcom_pcie_ops ops_2_1_0 = {
  942. .get_resources = qcom_pcie_get_resources_2_1_0,
  943. .init = qcom_pcie_init_2_1_0,
  944. .deinit = qcom_pcie_deinit_2_1_0,
  945. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  946. };
  947. /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
  948. static const struct qcom_pcie_ops ops_1_0_0 = {
  949. .get_resources = qcom_pcie_get_resources_1_0_0,
  950. .init = qcom_pcie_init_1_0_0,
  951. .deinit = qcom_pcie_deinit_1_0_0,
  952. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  953. };
  954. /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
  955. static const struct qcom_pcie_ops ops_2_3_2 = {
  956. .get_resources = qcom_pcie_get_resources_2_3_2,
  957. .init = qcom_pcie_init_2_3_2,
  958. .post_init = qcom_pcie_post_init_2_3_2,
  959. .deinit = qcom_pcie_deinit_2_3_2,
  960. .post_deinit = qcom_pcie_post_deinit_2_3_2,
  961. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  962. };
  963. /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
  964. static const struct qcom_pcie_ops ops_2_4_0 = {
  965. .get_resources = qcom_pcie_get_resources_2_4_0,
  966. .init = qcom_pcie_init_2_4_0,
  967. .deinit = qcom_pcie_deinit_2_4_0,
  968. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  969. };
  970. /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
  971. static const struct qcom_pcie_ops ops_2_3_3 = {
  972. .get_resources = qcom_pcie_get_resources_2_3_3,
  973. .init = qcom_pcie_init_2_3_3,
  974. .deinit = qcom_pcie_deinit_2_3_3,
  975. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  976. };
  977. static const struct dw_pcie_ops dw_pcie_ops = {
  978. .link_up = qcom_pcie_link_up,
  979. };
  980. static int qcom_pcie_probe(struct platform_device *pdev)
  981. {
  982. struct device *dev = &pdev->dev;
  983. struct resource *res;
  984. struct pcie_port *pp;
  985. struct dw_pcie *pci;
  986. struct qcom_pcie *pcie;
  987. int ret;
  988. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  989. if (!pcie)
  990. return -ENOMEM;
  991. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  992. if (!pci)
  993. return -ENOMEM;
  994. pm_runtime_enable(dev);
  995. pci->dev = dev;
  996. pci->ops = &dw_pcie_ops;
  997. pp = &pci->pp;
  998. pcie->pci = pci;
  999. pcie->ops = of_device_get_match_data(dev);
  1000. pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
  1001. if (IS_ERR(pcie->reset))
  1002. return PTR_ERR(pcie->reset);
  1003. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
  1004. pcie->parf = devm_ioremap_resource(dev, res);
  1005. if (IS_ERR(pcie->parf))
  1006. return PTR_ERR(pcie->parf);
  1007. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
  1008. pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
  1009. if (IS_ERR(pci->dbi_base))
  1010. return PTR_ERR(pci->dbi_base);
  1011. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
  1012. pcie->elbi = devm_ioremap_resource(dev, res);
  1013. if (IS_ERR(pcie->elbi))
  1014. return PTR_ERR(pcie->elbi);
  1015. pcie->phy = devm_phy_optional_get(dev, "pciephy");
  1016. if (IS_ERR(pcie->phy))
  1017. return PTR_ERR(pcie->phy);
  1018. ret = pcie->ops->get_resources(pcie);
  1019. if (ret)
  1020. return ret;
  1021. pp->root_bus_nr = -1;
  1022. pp->ops = &qcom_pcie_dw_ops;
  1023. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  1024. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  1025. if (pp->msi_irq < 0)
  1026. return pp->msi_irq;
  1027. }
  1028. ret = phy_init(pcie->phy);
  1029. if (ret) {
  1030. pm_runtime_disable(&pdev->dev);
  1031. return ret;
  1032. }
  1033. platform_set_drvdata(pdev, pcie);
  1034. ret = dw_pcie_host_init(pp);
  1035. if (ret) {
  1036. dev_err(dev, "cannot initialize host\n");
  1037. pm_runtime_disable(&pdev->dev);
  1038. return ret;
  1039. }
  1040. return 0;
  1041. }
  1042. static const struct of_device_id qcom_pcie_match[] = {
  1043. { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
  1044. { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
  1045. { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
  1046. { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
  1047. { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
  1048. { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
  1049. { }
  1050. };
  1051. static struct platform_driver qcom_pcie_driver = {
  1052. .probe = qcom_pcie_probe,
  1053. .driver = {
  1054. .name = "qcom-pcie",
  1055. .suppress_bind_attrs = true,
  1056. .of_match_table = qcom_pcie_match,
  1057. },
  1058. };
  1059. builtin_platform_driver(qcom_pcie_driver);