pcie-qcom.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. /*
  2. * Qualcomm PCIe root complex driver
  3. *
  4. * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  5. * Copyright 2015 Linaro Limited.
  6. *
  7. * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 and
  11. * only version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/delay.h>
  20. #include <linux/gpio.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/io.h>
  23. #include <linux/iopoll.h>
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_gpio.h>
  28. #include <linux/pci.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/phy/phy.h>
  31. #include <linux/regulator/consumer.h>
  32. #include <linux/reset.h>
  33. #include <linux/slab.h>
  34. #include <linux/types.h>
  35. #include "pcie-designware.h"
  36. #define PCIE20_PARF_SYS_CTRL 0x00
  37. #define MST_WAKEUP_EN BIT(13)
  38. #define SLV_WAKEUP_EN BIT(12)
  39. #define MSTR_ACLK_CGC_DIS BIT(10)
  40. #define SLV_ACLK_CGC_DIS BIT(9)
  41. #define CORE_CLK_CGC_DIS BIT(6)
  42. #define AUX_PWR_DET BIT(4)
  43. #define L23_CLK_RMV_DIS BIT(2)
  44. #define L1_CLK_RMV_DIS BIT(1)
  45. #define PCIE20_COMMAND_STATUS 0x04
  46. #define CMD_BME_VAL 0x4
  47. #define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
  48. #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
  49. #define PCIE20_PARF_PHY_CTRL 0x40
  50. #define PCIE20_PARF_PHY_REFCLK 0x4C
  51. #define PCIE20_PARF_DBI_BASE_ADDR 0x168
  52. #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
  53. #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
  54. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
  55. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
  56. #define PCIE20_PARF_LTSSM 0x1B0
  57. #define PCIE20_PARF_SID_OFFSET 0x234
  58. #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
  59. #define PCIE20_ELBI_SYS_CTRL 0x04
  60. #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
  61. #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
  62. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
  63. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
  64. #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
  65. #define CFG_BRIDGE_SB_INIT BIT(0)
  66. #define PCIE20_CAP 0x70
  67. #define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
  68. #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
  69. #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
  70. #define PCIE_CAP_LINK1_VAL 0x2FD7F
  71. #define PCIE20_PARF_Q2A_FLUSH 0x1AC
  72. #define PCIE20_MISC_CONTROL_1_REG 0x8BC
  73. #define DBI_RO_WR_EN 1
  74. #define PERST_DELAY_US 1000
  75. #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
  76. #define SLV_ADDR_SPACE_SZ 0x10000000
  77. struct qcom_pcie_resources_2_1_0 {
  78. struct clk *iface_clk;
  79. struct clk *core_clk;
  80. struct clk *phy_clk;
  81. struct reset_control *pci_reset;
  82. struct reset_control *axi_reset;
  83. struct reset_control *ahb_reset;
  84. struct reset_control *por_reset;
  85. struct reset_control *phy_reset;
  86. struct regulator *vdda;
  87. struct regulator *vdda_phy;
  88. struct regulator *vdda_refclk;
  89. };
  90. struct qcom_pcie_resources_1_0_0 {
  91. struct clk *iface;
  92. struct clk *aux;
  93. struct clk *master_bus;
  94. struct clk *slave_bus;
  95. struct reset_control *core;
  96. struct regulator *vdda;
  97. };
  98. struct qcom_pcie_resources_2_3_2 {
  99. struct clk *aux_clk;
  100. struct clk *master_clk;
  101. struct clk *slave_clk;
  102. struct clk *cfg_clk;
  103. struct clk *pipe_clk;
  104. };
  105. struct qcom_pcie_resources_2_4_0 {
  106. struct clk *aux_clk;
  107. struct clk *master_clk;
  108. struct clk *slave_clk;
  109. struct reset_control *axi_m_reset;
  110. struct reset_control *axi_s_reset;
  111. struct reset_control *pipe_reset;
  112. struct reset_control *axi_m_vmid_reset;
  113. struct reset_control *axi_s_xpu_reset;
  114. struct reset_control *parf_reset;
  115. struct reset_control *phy_reset;
  116. struct reset_control *axi_m_sticky_reset;
  117. struct reset_control *pipe_sticky_reset;
  118. struct reset_control *pwr_reset;
  119. struct reset_control *ahb_reset;
  120. struct reset_control *phy_ahb_reset;
  121. };
  122. struct qcom_pcie_resources_2_3_3 {
  123. struct clk *iface;
  124. struct clk *axi_m_clk;
  125. struct clk *axi_s_clk;
  126. struct clk *ahb_clk;
  127. struct clk *aux_clk;
  128. struct reset_control *rst[7];
  129. };
  130. union qcom_pcie_resources {
  131. struct qcom_pcie_resources_1_0_0 v1_0_0;
  132. struct qcom_pcie_resources_2_1_0 v2_1_0;
  133. struct qcom_pcie_resources_2_3_2 v2_3_2;
  134. struct qcom_pcie_resources_2_3_3 v2_3_3;
  135. struct qcom_pcie_resources_2_4_0 v2_4_0;
  136. };
  137. struct qcom_pcie;
  138. struct qcom_pcie_ops {
  139. int (*get_resources)(struct qcom_pcie *pcie);
  140. int (*init)(struct qcom_pcie *pcie);
  141. int (*post_init)(struct qcom_pcie *pcie);
  142. void (*deinit)(struct qcom_pcie *pcie);
  143. void (*post_deinit)(struct qcom_pcie *pcie);
  144. void (*ltssm_enable)(struct qcom_pcie *pcie);
  145. };
  146. struct qcom_pcie {
  147. struct dw_pcie *pci;
  148. void __iomem *parf; /* DT parf */
  149. void __iomem *elbi; /* DT elbi */
  150. union qcom_pcie_resources res;
  151. struct phy *phy;
  152. struct gpio_desc *reset;
  153. struct qcom_pcie_ops *ops;
  154. };
  155. #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
  156. static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
  157. {
  158. gpiod_set_value_cansleep(pcie->reset, 1);
  159. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  160. }
  161. static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
  162. {
  163. gpiod_set_value_cansleep(pcie->reset, 0);
  164. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  165. }
  166. static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
  167. {
  168. struct pcie_port *pp = arg;
  169. return dw_handle_msi_irq(pp);
  170. }
  171. static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
  172. {
  173. struct dw_pcie *pci = pcie->pci;
  174. if (dw_pcie_link_up(pci))
  175. return 0;
  176. /* Enable Link Training state machine */
  177. if (pcie->ops->ltssm_enable)
  178. pcie->ops->ltssm_enable(pcie);
  179. return dw_pcie_wait_for_link(pci);
  180. }
  181. static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
  182. {
  183. u32 val;
  184. /* enable link training */
  185. val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  186. val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
  187. writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  188. }
  189. static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
  190. {
  191. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  192. struct dw_pcie *pci = pcie->pci;
  193. struct device *dev = pci->dev;
  194. res->vdda = devm_regulator_get(dev, "vdda");
  195. if (IS_ERR(res->vdda))
  196. return PTR_ERR(res->vdda);
  197. res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
  198. if (IS_ERR(res->vdda_phy))
  199. return PTR_ERR(res->vdda_phy);
  200. res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
  201. if (IS_ERR(res->vdda_refclk))
  202. return PTR_ERR(res->vdda_refclk);
  203. res->iface_clk = devm_clk_get(dev, "iface");
  204. if (IS_ERR(res->iface_clk))
  205. return PTR_ERR(res->iface_clk);
  206. res->core_clk = devm_clk_get(dev, "core");
  207. if (IS_ERR(res->core_clk))
  208. return PTR_ERR(res->core_clk);
  209. res->phy_clk = devm_clk_get(dev, "phy");
  210. if (IS_ERR(res->phy_clk))
  211. return PTR_ERR(res->phy_clk);
  212. res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
  213. if (IS_ERR(res->pci_reset))
  214. return PTR_ERR(res->pci_reset);
  215. res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
  216. if (IS_ERR(res->axi_reset))
  217. return PTR_ERR(res->axi_reset);
  218. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  219. if (IS_ERR(res->ahb_reset))
  220. return PTR_ERR(res->ahb_reset);
  221. res->por_reset = devm_reset_control_get_exclusive(dev, "por");
  222. if (IS_ERR(res->por_reset))
  223. return PTR_ERR(res->por_reset);
  224. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  225. return PTR_ERR_OR_ZERO(res->phy_reset);
  226. }
  227. static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
  228. {
  229. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  230. reset_control_assert(res->pci_reset);
  231. reset_control_assert(res->axi_reset);
  232. reset_control_assert(res->ahb_reset);
  233. reset_control_assert(res->por_reset);
  234. reset_control_assert(res->pci_reset);
  235. clk_disable_unprepare(res->iface_clk);
  236. clk_disable_unprepare(res->core_clk);
  237. clk_disable_unprepare(res->phy_clk);
  238. regulator_disable(res->vdda);
  239. regulator_disable(res->vdda_phy);
  240. regulator_disable(res->vdda_refclk);
  241. }
  242. static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
  243. {
  244. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  245. struct dw_pcie *pci = pcie->pci;
  246. struct device *dev = pci->dev;
  247. u32 val;
  248. int ret;
  249. ret = regulator_enable(res->vdda);
  250. if (ret) {
  251. dev_err(dev, "cannot enable vdda regulator\n");
  252. return ret;
  253. }
  254. ret = regulator_enable(res->vdda_refclk);
  255. if (ret) {
  256. dev_err(dev, "cannot enable vdda_refclk regulator\n");
  257. goto err_refclk;
  258. }
  259. ret = regulator_enable(res->vdda_phy);
  260. if (ret) {
  261. dev_err(dev, "cannot enable vdda_phy regulator\n");
  262. goto err_vdda_phy;
  263. }
  264. ret = reset_control_assert(res->ahb_reset);
  265. if (ret) {
  266. dev_err(dev, "cannot assert ahb reset\n");
  267. goto err_assert_ahb;
  268. }
  269. ret = clk_prepare_enable(res->iface_clk);
  270. if (ret) {
  271. dev_err(dev, "cannot prepare/enable iface clock\n");
  272. goto err_assert_ahb;
  273. }
  274. ret = clk_prepare_enable(res->phy_clk);
  275. if (ret) {
  276. dev_err(dev, "cannot prepare/enable phy clock\n");
  277. goto err_clk_phy;
  278. }
  279. ret = clk_prepare_enable(res->core_clk);
  280. if (ret) {
  281. dev_err(dev, "cannot prepare/enable core clock\n");
  282. goto err_clk_core;
  283. }
  284. ret = reset_control_deassert(res->ahb_reset);
  285. if (ret) {
  286. dev_err(dev, "cannot deassert ahb reset\n");
  287. goto err_deassert_ahb;
  288. }
  289. /* enable PCIe clocks and resets */
  290. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  291. val &= ~BIT(0);
  292. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  293. /* enable external reference clock */
  294. val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
  295. val |= BIT(16);
  296. writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
  297. ret = reset_control_deassert(res->phy_reset);
  298. if (ret) {
  299. dev_err(dev, "cannot deassert phy reset\n");
  300. return ret;
  301. }
  302. ret = reset_control_deassert(res->pci_reset);
  303. if (ret) {
  304. dev_err(dev, "cannot deassert pci reset\n");
  305. return ret;
  306. }
  307. ret = reset_control_deassert(res->por_reset);
  308. if (ret) {
  309. dev_err(dev, "cannot deassert por reset\n");
  310. return ret;
  311. }
  312. ret = reset_control_deassert(res->axi_reset);
  313. if (ret) {
  314. dev_err(dev, "cannot deassert axi reset\n");
  315. return ret;
  316. }
  317. /* wait for clock acquisition */
  318. usleep_range(1000, 1500);
  319. /* Set the Max TLP size to 2K, instead of using default of 4K */
  320. writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
  321. pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
  322. writel(CFG_BRIDGE_SB_INIT,
  323. pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
  324. return 0;
  325. err_deassert_ahb:
  326. clk_disable_unprepare(res->core_clk);
  327. err_clk_core:
  328. clk_disable_unprepare(res->phy_clk);
  329. err_clk_phy:
  330. clk_disable_unprepare(res->iface_clk);
  331. err_assert_ahb:
  332. regulator_disable(res->vdda_phy);
  333. err_vdda_phy:
  334. regulator_disable(res->vdda_refclk);
  335. err_refclk:
  336. regulator_disable(res->vdda);
  337. return ret;
  338. }
  339. static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
  340. {
  341. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  342. struct dw_pcie *pci = pcie->pci;
  343. struct device *dev = pci->dev;
  344. res->vdda = devm_regulator_get(dev, "vdda");
  345. if (IS_ERR(res->vdda))
  346. return PTR_ERR(res->vdda);
  347. res->iface = devm_clk_get(dev, "iface");
  348. if (IS_ERR(res->iface))
  349. return PTR_ERR(res->iface);
  350. res->aux = devm_clk_get(dev, "aux");
  351. if (IS_ERR(res->aux))
  352. return PTR_ERR(res->aux);
  353. res->master_bus = devm_clk_get(dev, "master_bus");
  354. if (IS_ERR(res->master_bus))
  355. return PTR_ERR(res->master_bus);
  356. res->slave_bus = devm_clk_get(dev, "slave_bus");
  357. if (IS_ERR(res->slave_bus))
  358. return PTR_ERR(res->slave_bus);
  359. res->core = devm_reset_control_get_exclusive(dev, "core");
  360. return PTR_ERR_OR_ZERO(res->core);
  361. }
  362. static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
  363. {
  364. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  365. reset_control_assert(res->core);
  366. clk_disable_unprepare(res->slave_bus);
  367. clk_disable_unprepare(res->master_bus);
  368. clk_disable_unprepare(res->iface);
  369. clk_disable_unprepare(res->aux);
  370. regulator_disable(res->vdda);
  371. }
  372. static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
  373. {
  374. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  375. struct dw_pcie *pci = pcie->pci;
  376. struct device *dev = pci->dev;
  377. int ret;
  378. ret = reset_control_deassert(res->core);
  379. if (ret) {
  380. dev_err(dev, "cannot deassert core reset\n");
  381. return ret;
  382. }
  383. ret = clk_prepare_enable(res->aux);
  384. if (ret) {
  385. dev_err(dev, "cannot prepare/enable aux clock\n");
  386. goto err_res;
  387. }
  388. ret = clk_prepare_enable(res->iface);
  389. if (ret) {
  390. dev_err(dev, "cannot prepare/enable iface clock\n");
  391. goto err_aux;
  392. }
  393. ret = clk_prepare_enable(res->master_bus);
  394. if (ret) {
  395. dev_err(dev, "cannot prepare/enable master_bus clock\n");
  396. goto err_iface;
  397. }
  398. ret = clk_prepare_enable(res->slave_bus);
  399. if (ret) {
  400. dev_err(dev, "cannot prepare/enable slave_bus clock\n");
  401. goto err_master;
  402. }
  403. ret = regulator_enable(res->vdda);
  404. if (ret) {
  405. dev_err(dev, "cannot enable vdda regulator\n");
  406. goto err_slave;
  407. }
  408. /* change DBI base address */
  409. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  410. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  411. u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  412. val |= BIT(31);
  413. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  414. }
  415. return 0;
  416. err_slave:
  417. clk_disable_unprepare(res->slave_bus);
  418. err_master:
  419. clk_disable_unprepare(res->master_bus);
  420. err_iface:
  421. clk_disable_unprepare(res->iface);
  422. err_aux:
  423. clk_disable_unprepare(res->aux);
  424. err_res:
  425. reset_control_assert(res->core);
  426. return ret;
  427. }
  428. static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
  429. {
  430. u32 val;
  431. /* enable link training */
  432. val = readl(pcie->parf + PCIE20_PARF_LTSSM);
  433. val |= BIT(8);
  434. writel(val, pcie->parf + PCIE20_PARF_LTSSM);
  435. }
  436. static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
  437. {
  438. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  439. struct dw_pcie *pci = pcie->pci;
  440. struct device *dev = pci->dev;
  441. res->aux_clk = devm_clk_get(dev, "aux");
  442. if (IS_ERR(res->aux_clk))
  443. return PTR_ERR(res->aux_clk);
  444. res->cfg_clk = devm_clk_get(dev, "cfg");
  445. if (IS_ERR(res->cfg_clk))
  446. return PTR_ERR(res->cfg_clk);
  447. res->master_clk = devm_clk_get(dev, "bus_master");
  448. if (IS_ERR(res->master_clk))
  449. return PTR_ERR(res->master_clk);
  450. res->slave_clk = devm_clk_get(dev, "bus_slave");
  451. if (IS_ERR(res->slave_clk))
  452. return PTR_ERR(res->slave_clk);
  453. res->pipe_clk = devm_clk_get(dev, "pipe");
  454. return PTR_ERR_OR_ZERO(res->pipe_clk);
  455. }
  456. static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
  457. {
  458. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  459. clk_disable_unprepare(res->slave_clk);
  460. clk_disable_unprepare(res->master_clk);
  461. clk_disable_unprepare(res->cfg_clk);
  462. clk_disable_unprepare(res->aux_clk);
  463. }
  464. static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
  465. {
  466. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  467. clk_disable_unprepare(res->pipe_clk);
  468. }
  469. static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
  470. {
  471. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  472. struct dw_pcie *pci = pcie->pci;
  473. struct device *dev = pci->dev;
  474. u32 val;
  475. int ret;
  476. ret = clk_prepare_enable(res->aux_clk);
  477. if (ret) {
  478. dev_err(dev, "cannot prepare/enable aux clock\n");
  479. return ret;
  480. }
  481. ret = clk_prepare_enable(res->cfg_clk);
  482. if (ret) {
  483. dev_err(dev, "cannot prepare/enable cfg clock\n");
  484. goto err_cfg_clk;
  485. }
  486. ret = clk_prepare_enable(res->master_clk);
  487. if (ret) {
  488. dev_err(dev, "cannot prepare/enable master clock\n");
  489. goto err_master_clk;
  490. }
  491. ret = clk_prepare_enable(res->slave_clk);
  492. if (ret) {
  493. dev_err(dev, "cannot prepare/enable slave clock\n");
  494. goto err_slave_clk;
  495. }
  496. /* enable PCIe clocks and resets */
  497. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  498. val &= ~BIT(0);
  499. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  500. /* change DBI base address */
  501. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  502. /* MAC PHY_POWERDOWN MUX DISABLE */
  503. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  504. val &= ~BIT(29);
  505. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  506. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  507. val |= BIT(4);
  508. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  509. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  510. val |= BIT(31);
  511. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  512. return 0;
  513. err_slave_clk:
  514. clk_disable_unprepare(res->master_clk);
  515. err_master_clk:
  516. clk_disable_unprepare(res->cfg_clk);
  517. err_cfg_clk:
  518. clk_disable_unprepare(res->aux_clk);
  519. return ret;
  520. }
  521. static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
  522. {
  523. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  524. struct dw_pcie *pci = pcie->pci;
  525. struct device *dev = pci->dev;
  526. int ret;
  527. ret = clk_prepare_enable(res->pipe_clk);
  528. if (ret) {
  529. dev_err(dev, "cannot prepare/enable pipe clock\n");
  530. return ret;
  531. }
  532. return 0;
  533. }
  534. static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
  535. {
  536. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  537. struct dw_pcie *pci = pcie->pci;
  538. struct device *dev = pci->dev;
  539. res->aux_clk = devm_clk_get(dev, "aux");
  540. if (IS_ERR(res->aux_clk))
  541. return PTR_ERR(res->aux_clk);
  542. res->master_clk = devm_clk_get(dev, "master_bus");
  543. if (IS_ERR(res->master_clk))
  544. return PTR_ERR(res->master_clk);
  545. res->slave_clk = devm_clk_get(dev, "slave_bus");
  546. if (IS_ERR(res->slave_clk))
  547. return PTR_ERR(res->slave_clk);
  548. res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
  549. if (IS_ERR(res->axi_m_reset))
  550. return PTR_ERR(res->axi_m_reset);
  551. res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
  552. if (IS_ERR(res->axi_s_reset))
  553. return PTR_ERR(res->axi_s_reset);
  554. res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
  555. if (IS_ERR(res->pipe_reset))
  556. return PTR_ERR(res->pipe_reset);
  557. res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
  558. "axi_m_vmid");
  559. if (IS_ERR(res->axi_m_vmid_reset))
  560. return PTR_ERR(res->axi_m_vmid_reset);
  561. res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
  562. "axi_s_xpu");
  563. if (IS_ERR(res->axi_s_xpu_reset))
  564. return PTR_ERR(res->axi_s_xpu_reset);
  565. res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
  566. if (IS_ERR(res->parf_reset))
  567. return PTR_ERR(res->parf_reset);
  568. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  569. if (IS_ERR(res->phy_reset))
  570. return PTR_ERR(res->phy_reset);
  571. res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
  572. "axi_m_sticky");
  573. if (IS_ERR(res->axi_m_sticky_reset))
  574. return PTR_ERR(res->axi_m_sticky_reset);
  575. res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
  576. "pipe_sticky");
  577. if (IS_ERR(res->pipe_sticky_reset))
  578. return PTR_ERR(res->pipe_sticky_reset);
  579. res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
  580. if (IS_ERR(res->pwr_reset))
  581. return PTR_ERR(res->pwr_reset);
  582. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  583. if (IS_ERR(res->ahb_reset))
  584. return PTR_ERR(res->ahb_reset);
  585. res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
  586. if (IS_ERR(res->phy_ahb_reset))
  587. return PTR_ERR(res->phy_ahb_reset);
  588. return 0;
  589. }
  590. static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
  591. {
  592. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  593. reset_control_assert(res->axi_m_reset);
  594. reset_control_assert(res->axi_s_reset);
  595. reset_control_assert(res->pipe_reset);
  596. reset_control_assert(res->pipe_sticky_reset);
  597. reset_control_assert(res->phy_reset);
  598. reset_control_assert(res->phy_ahb_reset);
  599. reset_control_assert(res->axi_m_sticky_reset);
  600. reset_control_assert(res->pwr_reset);
  601. reset_control_assert(res->ahb_reset);
  602. clk_disable_unprepare(res->aux_clk);
  603. clk_disable_unprepare(res->master_clk);
  604. clk_disable_unprepare(res->slave_clk);
  605. }
  606. static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
  607. {
  608. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  609. struct dw_pcie *pci = pcie->pci;
  610. struct device *dev = pci->dev;
  611. u32 val;
  612. int ret;
  613. ret = reset_control_assert(res->axi_m_reset);
  614. if (ret) {
  615. dev_err(dev, "cannot assert axi master reset\n");
  616. return ret;
  617. }
  618. ret = reset_control_assert(res->axi_s_reset);
  619. if (ret) {
  620. dev_err(dev, "cannot assert axi slave reset\n");
  621. return ret;
  622. }
  623. usleep_range(10000, 12000);
  624. ret = reset_control_assert(res->pipe_reset);
  625. if (ret) {
  626. dev_err(dev, "cannot assert pipe reset\n");
  627. return ret;
  628. }
  629. ret = reset_control_assert(res->pipe_sticky_reset);
  630. if (ret) {
  631. dev_err(dev, "cannot assert pipe sticky reset\n");
  632. return ret;
  633. }
  634. ret = reset_control_assert(res->phy_reset);
  635. if (ret) {
  636. dev_err(dev, "cannot assert phy reset\n");
  637. return ret;
  638. }
  639. ret = reset_control_assert(res->phy_ahb_reset);
  640. if (ret) {
  641. dev_err(dev, "cannot assert phy ahb reset\n");
  642. return ret;
  643. }
  644. usleep_range(10000, 12000);
  645. ret = reset_control_assert(res->axi_m_sticky_reset);
  646. if (ret) {
  647. dev_err(dev, "cannot assert axi master sticky reset\n");
  648. return ret;
  649. }
  650. ret = reset_control_assert(res->pwr_reset);
  651. if (ret) {
  652. dev_err(dev, "cannot assert power reset\n");
  653. return ret;
  654. }
  655. ret = reset_control_assert(res->ahb_reset);
  656. if (ret) {
  657. dev_err(dev, "cannot assert ahb reset\n");
  658. return ret;
  659. }
  660. usleep_range(10000, 12000);
  661. ret = reset_control_deassert(res->phy_ahb_reset);
  662. if (ret) {
  663. dev_err(dev, "cannot deassert phy ahb reset\n");
  664. return ret;
  665. }
  666. ret = reset_control_deassert(res->phy_reset);
  667. if (ret) {
  668. dev_err(dev, "cannot deassert phy reset\n");
  669. goto err_rst_phy;
  670. }
  671. ret = reset_control_deassert(res->pipe_reset);
  672. if (ret) {
  673. dev_err(dev, "cannot deassert pipe reset\n");
  674. goto err_rst_pipe;
  675. }
  676. ret = reset_control_deassert(res->pipe_sticky_reset);
  677. if (ret) {
  678. dev_err(dev, "cannot deassert pipe sticky reset\n");
  679. goto err_rst_pipe_sticky;
  680. }
  681. usleep_range(10000, 12000);
  682. ret = reset_control_deassert(res->axi_m_reset);
  683. if (ret) {
  684. dev_err(dev, "cannot deassert axi master reset\n");
  685. goto err_rst_axi_m;
  686. }
  687. ret = reset_control_deassert(res->axi_m_sticky_reset);
  688. if (ret) {
  689. dev_err(dev, "cannot deassert axi master sticky reset\n");
  690. goto err_rst_axi_m_sticky;
  691. }
  692. ret = reset_control_deassert(res->axi_s_reset);
  693. if (ret) {
  694. dev_err(dev, "cannot deassert axi slave reset\n");
  695. goto err_rst_axi_s;
  696. }
  697. ret = reset_control_deassert(res->pwr_reset);
  698. if (ret) {
  699. dev_err(dev, "cannot deassert power reset\n");
  700. goto err_rst_pwr;
  701. }
  702. ret = reset_control_deassert(res->ahb_reset);
  703. if (ret) {
  704. dev_err(dev, "cannot deassert ahb reset\n");
  705. goto err_rst_ahb;
  706. }
  707. usleep_range(10000, 12000);
  708. ret = clk_prepare_enable(res->aux_clk);
  709. if (ret) {
  710. dev_err(dev, "cannot prepare/enable iface clock\n");
  711. goto err_clk_aux;
  712. }
  713. ret = clk_prepare_enable(res->master_clk);
  714. if (ret) {
  715. dev_err(dev, "cannot prepare/enable core clock\n");
  716. goto err_clk_axi_m;
  717. }
  718. ret = clk_prepare_enable(res->slave_clk);
  719. if (ret) {
  720. dev_err(dev, "cannot prepare/enable phy clock\n");
  721. goto err_clk_axi_s;
  722. }
  723. /* enable PCIe clocks and resets */
  724. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  725. val &= !BIT(0);
  726. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  727. /* change DBI base address */
  728. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  729. /* MAC PHY_POWERDOWN MUX DISABLE */
  730. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  731. val &= ~BIT(29);
  732. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  733. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  734. val |= BIT(4);
  735. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  736. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  737. val |= BIT(31);
  738. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  739. return 0;
  740. err_clk_axi_s:
  741. clk_disable_unprepare(res->master_clk);
  742. err_clk_axi_m:
  743. clk_disable_unprepare(res->aux_clk);
  744. err_clk_aux:
  745. reset_control_assert(res->ahb_reset);
  746. err_rst_ahb:
  747. reset_control_assert(res->pwr_reset);
  748. err_rst_pwr:
  749. reset_control_assert(res->axi_s_reset);
  750. err_rst_axi_s:
  751. reset_control_assert(res->axi_m_sticky_reset);
  752. err_rst_axi_m_sticky:
  753. reset_control_assert(res->axi_m_reset);
  754. err_rst_axi_m:
  755. reset_control_assert(res->pipe_sticky_reset);
  756. err_rst_pipe_sticky:
  757. reset_control_assert(res->pipe_reset);
  758. err_rst_pipe:
  759. reset_control_assert(res->phy_reset);
  760. err_rst_phy:
  761. reset_control_assert(res->phy_ahb_reset);
  762. return ret;
  763. }
  764. static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
  765. {
  766. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  767. struct dw_pcie *pci = pcie->pci;
  768. struct device *dev = pci->dev;
  769. int i;
  770. const char *rst_names[] = { "axi_m", "axi_s", "pipe",
  771. "axi_m_sticky", "sticky",
  772. "ahb", "sleep", };
  773. res->iface = devm_clk_get(dev, "iface");
  774. if (IS_ERR(res->iface))
  775. return PTR_ERR(res->iface);
  776. res->axi_m_clk = devm_clk_get(dev, "axi_m");
  777. if (IS_ERR(res->axi_m_clk))
  778. return PTR_ERR(res->axi_m_clk);
  779. res->axi_s_clk = devm_clk_get(dev, "axi_s");
  780. if (IS_ERR(res->axi_s_clk))
  781. return PTR_ERR(res->axi_s_clk);
  782. res->ahb_clk = devm_clk_get(dev, "ahb");
  783. if (IS_ERR(res->ahb_clk))
  784. return PTR_ERR(res->ahb_clk);
  785. res->aux_clk = devm_clk_get(dev, "aux");
  786. if (IS_ERR(res->aux_clk))
  787. return PTR_ERR(res->aux_clk);
  788. for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
  789. res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
  790. if (IS_ERR(res->rst[i]))
  791. return PTR_ERR(res->rst[i]);
  792. }
  793. return 0;
  794. }
  795. static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
  796. {
  797. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  798. clk_disable_unprepare(res->iface);
  799. clk_disable_unprepare(res->axi_m_clk);
  800. clk_disable_unprepare(res->axi_s_clk);
  801. clk_disable_unprepare(res->ahb_clk);
  802. clk_disable_unprepare(res->aux_clk);
  803. }
  804. static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
  805. {
  806. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  807. struct dw_pcie *pci = pcie->pci;
  808. struct device *dev = pci->dev;
  809. int i, ret;
  810. u32 val;
  811. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  812. ret = reset_control_assert(res->rst[i]);
  813. if (ret) {
  814. dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
  815. return ret;
  816. }
  817. }
  818. usleep_range(2000, 2500);
  819. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  820. ret = reset_control_deassert(res->rst[i]);
  821. if (ret) {
  822. dev_err(dev, "reset #%d deassert failed (%d)\n", i,
  823. ret);
  824. return ret;
  825. }
  826. }
  827. /*
  828. * Don't have a way to see if the reset has completed.
  829. * Wait for some time.
  830. */
  831. usleep_range(2000, 2500);
  832. ret = clk_prepare_enable(res->iface);
  833. if (ret) {
  834. dev_err(dev, "cannot prepare/enable core clock\n");
  835. goto err_clk_iface;
  836. }
  837. ret = clk_prepare_enable(res->axi_m_clk);
  838. if (ret) {
  839. dev_err(dev, "cannot prepare/enable core clock\n");
  840. goto err_clk_axi_m;
  841. }
  842. ret = clk_prepare_enable(res->axi_s_clk);
  843. if (ret) {
  844. dev_err(dev, "cannot prepare/enable axi slave clock\n");
  845. goto err_clk_axi_s;
  846. }
  847. ret = clk_prepare_enable(res->ahb_clk);
  848. if (ret) {
  849. dev_err(dev, "cannot prepare/enable ahb clock\n");
  850. goto err_clk_ahb;
  851. }
  852. ret = clk_prepare_enable(res->aux_clk);
  853. if (ret) {
  854. dev_err(dev, "cannot prepare/enable aux clock\n");
  855. goto err_clk_aux;
  856. }
  857. writel(SLV_ADDR_SPACE_SZ,
  858. pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
  859. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  860. val &= ~BIT(0);
  861. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  862. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  863. writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
  864. | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
  865. AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
  866. pcie->parf + PCIE20_PARF_SYS_CTRL);
  867. writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
  868. writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
  869. writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
  870. writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
  871. val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
  872. val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
  873. writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
  874. writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
  875. PCIE20_DEVICE_CONTROL2_STATUS2);
  876. return 0;
  877. err_clk_aux:
  878. clk_disable_unprepare(res->ahb_clk);
  879. err_clk_ahb:
  880. clk_disable_unprepare(res->axi_s_clk);
  881. err_clk_axi_s:
  882. clk_disable_unprepare(res->axi_m_clk);
  883. err_clk_axi_m:
  884. clk_disable_unprepare(res->iface);
  885. err_clk_iface:
  886. /*
  887. * Not checking for failure, will anyway return
  888. * the original failure in 'ret'.
  889. */
  890. for (i = 0; i < ARRAY_SIZE(res->rst); i++)
  891. reset_control_assert(res->rst[i]);
  892. return ret;
  893. }
  894. static int qcom_pcie_link_up(struct dw_pcie *pci)
  895. {
  896. u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
  897. return !!(val & PCI_EXP_LNKSTA_DLLLA);
  898. }
  899. static int qcom_pcie_host_init(struct pcie_port *pp)
  900. {
  901. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  902. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  903. int ret;
  904. qcom_ep_reset_assert(pcie);
  905. ret = pcie->ops->init(pcie);
  906. if (ret)
  907. return ret;
  908. ret = phy_power_on(pcie->phy);
  909. if (ret)
  910. goto err_deinit;
  911. if (pcie->ops->post_init) {
  912. ret = pcie->ops->post_init(pcie);
  913. if (ret)
  914. goto err_disable_phy;
  915. }
  916. dw_pcie_setup_rc(pp);
  917. if (IS_ENABLED(CONFIG_PCI_MSI))
  918. dw_pcie_msi_init(pp);
  919. qcom_ep_reset_deassert(pcie);
  920. ret = qcom_pcie_establish_link(pcie);
  921. if (ret)
  922. goto err;
  923. return 0;
  924. err:
  925. qcom_ep_reset_assert(pcie);
  926. if (pcie->ops->post_deinit)
  927. pcie->ops->post_deinit(pcie);
  928. err_disable_phy:
  929. phy_power_off(pcie->phy);
  930. err_deinit:
  931. pcie->ops->deinit(pcie);
  932. return ret;
  933. }
  934. static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  935. u32 *val)
  936. {
  937. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  938. /* the device class is not reported correctly from the register */
  939. if (where == PCI_CLASS_REVISION && size == 4) {
  940. *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
  941. *val &= 0xff; /* keep revision id */
  942. *val |= PCI_CLASS_BRIDGE_PCI << 16;
  943. return PCIBIOS_SUCCESSFUL;
  944. }
  945. return dw_pcie_read(pci->dbi_base + where, size, val);
  946. }
  947. static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
  948. .host_init = qcom_pcie_host_init,
  949. .rd_own_conf = qcom_pcie_rd_own_conf,
  950. };
  951. /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
  952. static const struct qcom_pcie_ops ops_2_1_0 = {
  953. .get_resources = qcom_pcie_get_resources_2_1_0,
  954. .init = qcom_pcie_init_2_1_0,
  955. .deinit = qcom_pcie_deinit_2_1_0,
  956. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  957. };
  958. /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
  959. static const struct qcom_pcie_ops ops_1_0_0 = {
  960. .get_resources = qcom_pcie_get_resources_1_0_0,
  961. .init = qcom_pcie_init_1_0_0,
  962. .deinit = qcom_pcie_deinit_1_0_0,
  963. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  964. };
  965. /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
  966. static const struct qcom_pcie_ops ops_2_3_2 = {
  967. .get_resources = qcom_pcie_get_resources_2_3_2,
  968. .init = qcom_pcie_init_2_3_2,
  969. .post_init = qcom_pcie_post_init_2_3_2,
  970. .deinit = qcom_pcie_deinit_2_3_2,
  971. .post_deinit = qcom_pcie_post_deinit_2_3_2,
  972. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  973. };
  974. /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
  975. static const struct qcom_pcie_ops ops_2_4_0 = {
  976. .get_resources = qcom_pcie_get_resources_2_4_0,
  977. .init = qcom_pcie_init_2_4_0,
  978. .deinit = qcom_pcie_deinit_2_4_0,
  979. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  980. };
  981. /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
  982. static const struct qcom_pcie_ops ops_2_3_3 = {
  983. .get_resources = qcom_pcie_get_resources_2_3_3,
  984. .init = qcom_pcie_init_2_3_3,
  985. .deinit = qcom_pcie_deinit_2_3_3,
  986. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  987. };
  988. static const struct dw_pcie_ops dw_pcie_ops = {
  989. .link_up = qcom_pcie_link_up,
  990. };
  991. static int qcom_pcie_probe(struct platform_device *pdev)
  992. {
  993. struct device *dev = &pdev->dev;
  994. struct resource *res;
  995. struct pcie_port *pp;
  996. struct dw_pcie *pci;
  997. struct qcom_pcie *pcie;
  998. int ret;
  999. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  1000. if (!pcie)
  1001. return -ENOMEM;
  1002. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  1003. if (!pci)
  1004. return -ENOMEM;
  1005. pci->dev = dev;
  1006. pci->ops = &dw_pcie_ops;
  1007. pp = &pci->pp;
  1008. pcie->pci = pci;
  1009. pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
  1010. pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
  1011. if (IS_ERR(pcie->reset))
  1012. return PTR_ERR(pcie->reset);
  1013. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
  1014. pcie->parf = devm_ioremap_resource(dev, res);
  1015. if (IS_ERR(pcie->parf))
  1016. return PTR_ERR(pcie->parf);
  1017. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
  1018. pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
  1019. if (IS_ERR(pci->dbi_base))
  1020. return PTR_ERR(pci->dbi_base);
  1021. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
  1022. pcie->elbi = devm_ioremap_resource(dev, res);
  1023. if (IS_ERR(pcie->elbi))
  1024. return PTR_ERR(pcie->elbi);
  1025. pcie->phy = devm_phy_optional_get(dev, "pciephy");
  1026. if (IS_ERR(pcie->phy))
  1027. return PTR_ERR(pcie->phy);
  1028. ret = pcie->ops->get_resources(pcie);
  1029. if (ret)
  1030. return ret;
  1031. pp->root_bus_nr = -1;
  1032. pp->ops = &qcom_pcie_dw_ops;
  1033. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  1034. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  1035. if (pp->msi_irq < 0)
  1036. return pp->msi_irq;
  1037. ret = devm_request_irq(dev, pp->msi_irq,
  1038. qcom_pcie_msi_irq_handler,
  1039. IRQF_SHARED | IRQF_NO_THREAD,
  1040. "qcom-pcie-msi", pp);
  1041. if (ret) {
  1042. dev_err(dev, "cannot request msi irq\n");
  1043. return ret;
  1044. }
  1045. }
  1046. ret = phy_init(pcie->phy);
  1047. if (ret)
  1048. return ret;
  1049. platform_set_drvdata(pdev, pcie);
  1050. ret = dw_pcie_host_init(pp);
  1051. if (ret) {
  1052. dev_err(dev, "cannot initialize host\n");
  1053. return ret;
  1054. }
  1055. return 0;
  1056. }
  1057. static const struct of_device_id qcom_pcie_match[] = {
  1058. { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
  1059. { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
  1060. { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
  1061. { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
  1062. { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
  1063. { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
  1064. { }
  1065. };
  1066. static struct platform_driver qcom_pcie_driver = {
  1067. .probe = qcom_pcie_probe,
  1068. .driver = {
  1069. .name = "qcom-pcie",
  1070. .suppress_bind_attrs = true,
  1071. .of_match_table = qcom_pcie_match,
  1072. },
  1073. };
  1074. builtin_platform_driver(qcom_pcie_driver);