pcie-qcom.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Qualcomm PCIe root complex driver
  4. *
  5. * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  6. * Copyright 2015 Linaro Limited.
  7. *
  8. * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/gpio.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/iopoll.h>
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/of_device.h>
  19. #include <linux/of_gpio.h>
  20. #include <linux/pci.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/phy/phy.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/reset.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include "pcie-designware.h"
  28. #define PCIE20_PARF_SYS_CTRL 0x00
  29. #define MST_WAKEUP_EN BIT(13)
  30. #define SLV_WAKEUP_EN BIT(12)
  31. #define MSTR_ACLK_CGC_DIS BIT(10)
  32. #define SLV_ACLK_CGC_DIS BIT(9)
  33. #define CORE_CLK_CGC_DIS BIT(6)
  34. #define AUX_PWR_DET BIT(4)
  35. #define L23_CLK_RMV_DIS BIT(2)
  36. #define L1_CLK_RMV_DIS BIT(1)
  37. #define PCIE20_COMMAND_STATUS 0x04
  38. #define CMD_BME_VAL 0x4
  39. #define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
  40. #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
  41. #define PCIE20_PARF_PHY_CTRL 0x40
  42. #define PCIE20_PARF_PHY_REFCLK 0x4C
  43. #define PCIE20_PARF_DBI_BASE_ADDR 0x168
  44. #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
  45. #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
  46. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
  47. #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
  48. #define PCIE20_PARF_LTSSM 0x1B0
  49. #define PCIE20_PARF_SID_OFFSET 0x234
  50. #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
  51. #define PCIE20_ELBI_SYS_CTRL 0x04
  52. #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
  53. #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
  54. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
  55. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
  56. #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
  57. #define CFG_BRIDGE_SB_INIT BIT(0)
  58. #define PCIE20_CAP 0x70
  59. #define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
  60. #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
  61. #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
  62. #define PCIE_CAP_LINK1_VAL 0x2FD7F
  63. #define PCIE20_PARF_Q2A_FLUSH 0x1AC
  64. #define PCIE20_MISC_CONTROL_1_REG 0x8BC
  65. #define DBI_RO_WR_EN 1
  66. #define PERST_DELAY_US 1000
  67. #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
  68. #define SLV_ADDR_SPACE_SZ 0x10000000
  69. #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
  70. struct qcom_pcie_resources_2_1_0 {
  71. struct clk *iface_clk;
  72. struct clk *core_clk;
  73. struct clk *phy_clk;
  74. struct reset_control *pci_reset;
  75. struct reset_control *axi_reset;
  76. struct reset_control *ahb_reset;
  77. struct reset_control *por_reset;
  78. struct reset_control *phy_reset;
  79. struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
  80. };
  81. struct qcom_pcie_resources_1_0_0 {
  82. struct clk *iface;
  83. struct clk *aux;
  84. struct clk *master_bus;
  85. struct clk *slave_bus;
  86. struct reset_control *core;
  87. struct regulator *vdda;
  88. };
  89. #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
  90. struct qcom_pcie_resources_2_3_2 {
  91. struct clk *aux_clk;
  92. struct clk *master_clk;
  93. struct clk *slave_clk;
  94. struct clk *cfg_clk;
  95. struct clk *pipe_clk;
  96. struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
  97. };
  98. struct qcom_pcie_resources_2_4_0 {
  99. struct clk *aux_clk;
  100. struct clk *master_clk;
  101. struct clk *slave_clk;
  102. struct reset_control *axi_m_reset;
  103. struct reset_control *axi_s_reset;
  104. struct reset_control *pipe_reset;
  105. struct reset_control *axi_m_vmid_reset;
  106. struct reset_control *axi_s_xpu_reset;
  107. struct reset_control *parf_reset;
  108. struct reset_control *phy_reset;
  109. struct reset_control *axi_m_sticky_reset;
  110. struct reset_control *pipe_sticky_reset;
  111. struct reset_control *pwr_reset;
  112. struct reset_control *ahb_reset;
  113. struct reset_control *phy_ahb_reset;
  114. };
  115. struct qcom_pcie_resources_2_3_3 {
  116. struct clk *iface;
  117. struct clk *axi_m_clk;
  118. struct clk *axi_s_clk;
  119. struct clk *ahb_clk;
  120. struct clk *aux_clk;
  121. struct reset_control *rst[7];
  122. };
  123. union qcom_pcie_resources {
  124. struct qcom_pcie_resources_1_0_0 v1_0_0;
  125. struct qcom_pcie_resources_2_1_0 v2_1_0;
  126. struct qcom_pcie_resources_2_3_2 v2_3_2;
  127. struct qcom_pcie_resources_2_3_3 v2_3_3;
  128. struct qcom_pcie_resources_2_4_0 v2_4_0;
  129. };
  130. struct qcom_pcie;
  131. struct qcom_pcie_ops {
  132. int (*get_resources)(struct qcom_pcie *pcie);
  133. int (*init)(struct qcom_pcie *pcie);
  134. int (*post_init)(struct qcom_pcie *pcie);
  135. void (*deinit)(struct qcom_pcie *pcie);
  136. void (*post_deinit)(struct qcom_pcie *pcie);
  137. void (*ltssm_enable)(struct qcom_pcie *pcie);
  138. };
  139. struct qcom_pcie {
  140. struct dw_pcie *pci;
  141. void __iomem *parf; /* DT parf */
  142. void __iomem *elbi; /* DT elbi */
  143. union qcom_pcie_resources res;
  144. struct phy *phy;
  145. struct gpio_desc *reset;
  146. const struct qcom_pcie_ops *ops;
  147. };
  148. #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
  149. static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
  150. {
  151. gpiod_set_value_cansleep(pcie->reset, 1);
  152. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  153. }
  154. static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
  155. {
  156. gpiod_set_value_cansleep(pcie->reset, 0);
  157. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  158. }
  159. static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
  160. {
  161. struct dw_pcie *pci = pcie->pci;
  162. if (dw_pcie_link_up(pci))
  163. return 0;
  164. /* Enable Link Training state machine */
  165. if (pcie->ops->ltssm_enable)
  166. pcie->ops->ltssm_enable(pcie);
  167. return dw_pcie_wait_for_link(pci);
  168. }
  169. static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
  170. {
  171. u32 val;
  172. /* enable link training */
  173. val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  174. val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
  175. writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
  176. }
  177. static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
  178. {
  179. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  180. struct dw_pcie *pci = pcie->pci;
  181. struct device *dev = pci->dev;
  182. int ret;
  183. res->supplies[0].supply = "vdda";
  184. res->supplies[1].supply = "vdda_phy";
  185. res->supplies[2].supply = "vdda_refclk";
  186. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  187. res->supplies);
  188. if (ret)
  189. return ret;
  190. res->iface_clk = devm_clk_get(dev, "iface");
  191. if (IS_ERR(res->iface_clk))
  192. return PTR_ERR(res->iface_clk);
  193. res->core_clk = devm_clk_get(dev, "core");
  194. if (IS_ERR(res->core_clk))
  195. return PTR_ERR(res->core_clk);
  196. res->phy_clk = devm_clk_get(dev, "phy");
  197. if (IS_ERR(res->phy_clk))
  198. return PTR_ERR(res->phy_clk);
  199. res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
  200. if (IS_ERR(res->pci_reset))
  201. return PTR_ERR(res->pci_reset);
  202. res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
  203. if (IS_ERR(res->axi_reset))
  204. return PTR_ERR(res->axi_reset);
  205. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  206. if (IS_ERR(res->ahb_reset))
  207. return PTR_ERR(res->ahb_reset);
  208. res->por_reset = devm_reset_control_get_exclusive(dev, "por");
  209. if (IS_ERR(res->por_reset))
  210. return PTR_ERR(res->por_reset);
  211. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  212. return PTR_ERR_OR_ZERO(res->phy_reset);
  213. }
  214. static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
  215. {
  216. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  217. reset_control_assert(res->pci_reset);
  218. reset_control_assert(res->axi_reset);
  219. reset_control_assert(res->ahb_reset);
  220. reset_control_assert(res->por_reset);
  221. reset_control_assert(res->pci_reset);
  222. clk_disable_unprepare(res->iface_clk);
  223. clk_disable_unprepare(res->core_clk);
  224. clk_disable_unprepare(res->phy_clk);
  225. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  226. }
  227. static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
  228. {
  229. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  230. struct dw_pcie *pci = pcie->pci;
  231. struct device *dev = pci->dev;
  232. u32 val;
  233. int ret;
  234. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  235. if (ret < 0) {
  236. dev_err(dev, "cannot enable regulators\n");
  237. return ret;
  238. }
  239. ret = reset_control_assert(res->ahb_reset);
  240. if (ret) {
  241. dev_err(dev, "cannot assert ahb reset\n");
  242. goto err_assert_ahb;
  243. }
  244. ret = clk_prepare_enable(res->iface_clk);
  245. if (ret) {
  246. dev_err(dev, "cannot prepare/enable iface clock\n");
  247. goto err_assert_ahb;
  248. }
  249. ret = clk_prepare_enable(res->phy_clk);
  250. if (ret) {
  251. dev_err(dev, "cannot prepare/enable phy clock\n");
  252. goto err_clk_phy;
  253. }
  254. ret = clk_prepare_enable(res->core_clk);
  255. if (ret) {
  256. dev_err(dev, "cannot prepare/enable core clock\n");
  257. goto err_clk_core;
  258. }
  259. ret = reset_control_deassert(res->ahb_reset);
  260. if (ret) {
  261. dev_err(dev, "cannot deassert ahb reset\n");
  262. goto err_deassert_ahb;
  263. }
  264. /* enable PCIe clocks and resets */
  265. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  266. val &= ~BIT(0);
  267. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  268. /* enable external reference clock */
  269. val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
  270. val |= BIT(16);
  271. writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
  272. ret = reset_control_deassert(res->phy_reset);
  273. if (ret) {
  274. dev_err(dev, "cannot deassert phy reset\n");
  275. return ret;
  276. }
  277. ret = reset_control_deassert(res->pci_reset);
  278. if (ret) {
  279. dev_err(dev, "cannot deassert pci reset\n");
  280. return ret;
  281. }
  282. ret = reset_control_deassert(res->por_reset);
  283. if (ret) {
  284. dev_err(dev, "cannot deassert por reset\n");
  285. return ret;
  286. }
  287. ret = reset_control_deassert(res->axi_reset);
  288. if (ret) {
  289. dev_err(dev, "cannot deassert axi reset\n");
  290. return ret;
  291. }
  292. /* wait for clock acquisition */
  293. usleep_range(1000, 1500);
  294. /* Set the Max TLP size to 2K, instead of using default of 4K */
  295. writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
  296. pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
  297. writel(CFG_BRIDGE_SB_INIT,
  298. pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
  299. return 0;
  300. err_deassert_ahb:
  301. clk_disable_unprepare(res->core_clk);
  302. err_clk_core:
  303. clk_disable_unprepare(res->phy_clk);
  304. err_clk_phy:
  305. clk_disable_unprepare(res->iface_clk);
  306. err_assert_ahb:
  307. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  308. return ret;
  309. }
  310. static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
  311. {
  312. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  313. struct dw_pcie *pci = pcie->pci;
  314. struct device *dev = pci->dev;
  315. res->vdda = devm_regulator_get(dev, "vdda");
  316. if (IS_ERR(res->vdda))
  317. return PTR_ERR(res->vdda);
  318. res->iface = devm_clk_get(dev, "iface");
  319. if (IS_ERR(res->iface))
  320. return PTR_ERR(res->iface);
  321. res->aux = devm_clk_get(dev, "aux");
  322. if (IS_ERR(res->aux))
  323. return PTR_ERR(res->aux);
  324. res->master_bus = devm_clk_get(dev, "master_bus");
  325. if (IS_ERR(res->master_bus))
  326. return PTR_ERR(res->master_bus);
  327. res->slave_bus = devm_clk_get(dev, "slave_bus");
  328. if (IS_ERR(res->slave_bus))
  329. return PTR_ERR(res->slave_bus);
  330. res->core = devm_reset_control_get_exclusive(dev, "core");
  331. return PTR_ERR_OR_ZERO(res->core);
  332. }
  333. static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
  334. {
  335. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  336. reset_control_assert(res->core);
  337. clk_disable_unprepare(res->slave_bus);
  338. clk_disable_unprepare(res->master_bus);
  339. clk_disable_unprepare(res->iface);
  340. clk_disable_unprepare(res->aux);
  341. regulator_disable(res->vdda);
  342. }
  343. static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
  344. {
  345. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  346. struct dw_pcie *pci = pcie->pci;
  347. struct device *dev = pci->dev;
  348. int ret;
  349. ret = reset_control_deassert(res->core);
  350. if (ret) {
  351. dev_err(dev, "cannot deassert core reset\n");
  352. return ret;
  353. }
  354. ret = clk_prepare_enable(res->aux);
  355. if (ret) {
  356. dev_err(dev, "cannot prepare/enable aux clock\n");
  357. goto err_res;
  358. }
  359. ret = clk_prepare_enable(res->iface);
  360. if (ret) {
  361. dev_err(dev, "cannot prepare/enable iface clock\n");
  362. goto err_aux;
  363. }
  364. ret = clk_prepare_enable(res->master_bus);
  365. if (ret) {
  366. dev_err(dev, "cannot prepare/enable master_bus clock\n");
  367. goto err_iface;
  368. }
  369. ret = clk_prepare_enable(res->slave_bus);
  370. if (ret) {
  371. dev_err(dev, "cannot prepare/enable slave_bus clock\n");
  372. goto err_master;
  373. }
  374. ret = regulator_enable(res->vdda);
  375. if (ret) {
  376. dev_err(dev, "cannot enable vdda regulator\n");
  377. goto err_slave;
  378. }
  379. /* change DBI base address */
  380. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  381. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  382. u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  383. val |= BIT(31);
  384. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
  385. }
  386. return 0;
  387. err_slave:
  388. clk_disable_unprepare(res->slave_bus);
  389. err_master:
  390. clk_disable_unprepare(res->master_bus);
  391. err_iface:
  392. clk_disable_unprepare(res->iface);
  393. err_aux:
  394. clk_disable_unprepare(res->aux);
  395. err_res:
  396. reset_control_assert(res->core);
  397. return ret;
  398. }
  399. static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
  400. {
  401. u32 val;
  402. /* enable link training */
  403. val = readl(pcie->parf + PCIE20_PARF_LTSSM);
  404. val |= BIT(8);
  405. writel(val, pcie->parf + PCIE20_PARF_LTSSM);
  406. }
  407. static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
  408. {
  409. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  410. struct dw_pcie *pci = pcie->pci;
  411. struct device *dev = pci->dev;
  412. int ret;
  413. res->supplies[0].supply = "vdda";
  414. res->supplies[1].supply = "vddpe-3v3";
  415. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  416. res->supplies);
  417. if (ret)
  418. return ret;
  419. res->aux_clk = devm_clk_get(dev, "aux");
  420. if (IS_ERR(res->aux_clk))
  421. return PTR_ERR(res->aux_clk);
  422. res->cfg_clk = devm_clk_get(dev, "cfg");
  423. if (IS_ERR(res->cfg_clk))
  424. return PTR_ERR(res->cfg_clk);
  425. res->master_clk = devm_clk_get(dev, "bus_master");
  426. if (IS_ERR(res->master_clk))
  427. return PTR_ERR(res->master_clk);
  428. res->slave_clk = devm_clk_get(dev, "bus_slave");
  429. if (IS_ERR(res->slave_clk))
  430. return PTR_ERR(res->slave_clk);
  431. res->pipe_clk = devm_clk_get(dev, "pipe");
  432. return PTR_ERR_OR_ZERO(res->pipe_clk);
  433. }
  434. static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
  435. {
  436. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  437. clk_disable_unprepare(res->slave_clk);
  438. clk_disable_unprepare(res->master_clk);
  439. clk_disable_unprepare(res->cfg_clk);
  440. clk_disable_unprepare(res->aux_clk);
  441. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  442. }
  443. static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
  444. {
  445. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  446. clk_disable_unprepare(res->pipe_clk);
  447. }
  448. static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
  449. {
  450. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  451. struct dw_pcie *pci = pcie->pci;
  452. struct device *dev = pci->dev;
  453. u32 val;
  454. int ret;
  455. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  456. if (ret < 0) {
  457. dev_err(dev, "cannot enable regulators\n");
  458. return ret;
  459. }
  460. ret = clk_prepare_enable(res->aux_clk);
  461. if (ret) {
  462. dev_err(dev, "cannot prepare/enable aux clock\n");
  463. goto err_aux_clk;
  464. }
  465. ret = clk_prepare_enable(res->cfg_clk);
  466. if (ret) {
  467. dev_err(dev, "cannot prepare/enable cfg clock\n");
  468. goto err_cfg_clk;
  469. }
  470. ret = clk_prepare_enable(res->master_clk);
  471. if (ret) {
  472. dev_err(dev, "cannot prepare/enable master clock\n");
  473. goto err_master_clk;
  474. }
  475. ret = clk_prepare_enable(res->slave_clk);
  476. if (ret) {
  477. dev_err(dev, "cannot prepare/enable slave clock\n");
  478. goto err_slave_clk;
  479. }
  480. /* enable PCIe clocks and resets */
  481. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  482. val &= ~BIT(0);
  483. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  484. /* change DBI base address */
  485. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  486. /* MAC PHY_POWERDOWN MUX DISABLE */
  487. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  488. val &= ~BIT(29);
  489. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  490. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  491. val |= BIT(4);
  492. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  493. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  494. val |= BIT(31);
  495. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  496. return 0;
  497. err_slave_clk:
  498. clk_disable_unprepare(res->master_clk);
  499. err_master_clk:
  500. clk_disable_unprepare(res->cfg_clk);
  501. err_cfg_clk:
  502. clk_disable_unprepare(res->aux_clk);
  503. err_aux_clk:
  504. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  505. return ret;
  506. }
  507. static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
  508. {
  509. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  510. struct dw_pcie *pci = pcie->pci;
  511. struct device *dev = pci->dev;
  512. int ret;
  513. ret = clk_prepare_enable(res->pipe_clk);
  514. if (ret) {
  515. dev_err(dev, "cannot prepare/enable pipe clock\n");
  516. return ret;
  517. }
  518. return 0;
  519. }
  520. static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
  521. {
  522. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  523. struct dw_pcie *pci = pcie->pci;
  524. struct device *dev = pci->dev;
  525. res->aux_clk = devm_clk_get(dev, "aux");
  526. if (IS_ERR(res->aux_clk))
  527. return PTR_ERR(res->aux_clk);
  528. res->master_clk = devm_clk_get(dev, "master_bus");
  529. if (IS_ERR(res->master_clk))
  530. return PTR_ERR(res->master_clk);
  531. res->slave_clk = devm_clk_get(dev, "slave_bus");
  532. if (IS_ERR(res->slave_clk))
  533. return PTR_ERR(res->slave_clk);
  534. res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
  535. if (IS_ERR(res->axi_m_reset))
  536. return PTR_ERR(res->axi_m_reset);
  537. res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
  538. if (IS_ERR(res->axi_s_reset))
  539. return PTR_ERR(res->axi_s_reset);
  540. res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
  541. if (IS_ERR(res->pipe_reset))
  542. return PTR_ERR(res->pipe_reset);
  543. res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
  544. "axi_m_vmid");
  545. if (IS_ERR(res->axi_m_vmid_reset))
  546. return PTR_ERR(res->axi_m_vmid_reset);
  547. res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
  548. "axi_s_xpu");
  549. if (IS_ERR(res->axi_s_xpu_reset))
  550. return PTR_ERR(res->axi_s_xpu_reset);
  551. res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
  552. if (IS_ERR(res->parf_reset))
  553. return PTR_ERR(res->parf_reset);
  554. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  555. if (IS_ERR(res->phy_reset))
  556. return PTR_ERR(res->phy_reset);
  557. res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
  558. "axi_m_sticky");
  559. if (IS_ERR(res->axi_m_sticky_reset))
  560. return PTR_ERR(res->axi_m_sticky_reset);
  561. res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
  562. "pipe_sticky");
  563. if (IS_ERR(res->pipe_sticky_reset))
  564. return PTR_ERR(res->pipe_sticky_reset);
  565. res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
  566. if (IS_ERR(res->pwr_reset))
  567. return PTR_ERR(res->pwr_reset);
  568. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  569. if (IS_ERR(res->ahb_reset))
  570. return PTR_ERR(res->ahb_reset);
  571. res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
  572. if (IS_ERR(res->phy_ahb_reset))
  573. return PTR_ERR(res->phy_ahb_reset);
  574. return 0;
  575. }
  576. static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
  577. {
  578. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  579. reset_control_assert(res->axi_m_reset);
  580. reset_control_assert(res->axi_s_reset);
  581. reset_control_assert(res->pipe_reset);
  582. reset_control_assert(res->pipe_sticky_reset);
  583. reset_control_assert(res->phy_reset);
  584. reset_control_assert(res->phy_ahb_reset);
  585. reset_control_assert(res->axi_m_sticky_reset);
  586. reset_control_assert(res->pwr_reset);
  587. reset_control_assert(res->ahb_reset);
  588. clk_disable_unprepare(res->aux_clk);
  589. clk_disable_unprepare(res->master_clk);
  590. clk_disable_unprepare(res->slave_clk);
  591. }
  592. static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
  593. {
  594. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  595. struct dw_pcie *pci = pcie->pci;
  596. struct device *dev = pci->dev;
  597. u32 val;
  598. int ret;
  599. ret = reset_control_assert(res->axi_m_reset);
  600. if (ret) {
  601. dev_err(dev, "cannot assert axi master reset\n");
  602. return ret;
  603. }
  604. ret = reset_control_assert(res->axi_s_reset);
  605. if (ret) {
  606. dev_err(dev, "cannot assert axi slave reset\n");
  607. return ret;
  608. }
  609. usleep_range(10000, 12000);
  610. ret = reset_control_assert(res->pipe_reset);
  611. if (ret) {
  612. dev_err(dev, "cannot assert pipe reset\n");
  613. return ret;
  614. }
  615. ret = reset_control_assert(res->pipe_sticky_reset);
  616. if (ret) {
  617. dev_err(dev, "cannot assert pipe sticky reset\n");
  618. return ret;
  619. }
  620. ret = reset_control_assert(res->phy_reset);
  621. if (ret) {
  622. dev_err(dev, "cannot assert phy reset\n");
  623. return ret;
  624. }
  625. ret = reset_control_assert(res->phy_ahb_reset);
  626. if (ret) {
  627. dev_err(dev, "cannot assert phy ahb reset\n");
  628. return ret;
  629. }
  630. usleep_range(10000, 12000);
  631. ret = reset_control_assert(res->axi_m_sticky_reset);
  632. if (ret) {
  633. dev_err(dev, "cannot assert axi master sticky reset\n");
  634. return ret;
  635. }
  636. ret = reset_control_assert(res->pwr_reset);
  637. if (ret) {
  638. dev_err(dev, "cannot assert power reset\n");
  639. return ret;
  640. }
  641. ret = reset_control_assert(res->ahb_reset);
  642. if (ret) {
  643. dev_err(dev, "cannot assert ahb reset\n");
  644. return ret;
  645. }
  646. usleep_range(10000, 12000);
  647. ret = reset_control_deassert(res->phy_ahb_reset);
  648. if (ret) {
  649. dev_err(dev, "cannot deassert phy ahb reset\n");
  650. return ret;
  651. }
  652. ret = reset_control_deassert(res->phy_reset);
  653. if (ret) {
  654. dev_err(dev, "cannot deassert phy reset\n");
  655. goto err_rst_phy;
  656. }
  657. ret = reset_control_deassert(res->pipe_reset);
  658. if (ret) {
  659. dev_err(dev, "cannot deassert pipe reset\n");
  660. goto err_rst_pipe;
  661. }
  662. ret = reset_control_deassert(res->pipe_sticky_reset);
  663. if (ret) {
  664. dev_err(dev, "cannot deassert pipe sticky reset\n");
  665. goto err_rst_pipe_sticky;
  666. }
  667. usleep_range(10000, 12000);
  668. ret = reset_control_deassert(res->axi_m_reset);
  669. if (ret) {
  670. dev_err(dev, "cannot deassert axi master reset\n");
  671. goto err_rst_axi_m;
  672. }
  673. ret = reset_control_deassert(res->axi_m_sticky_reset);
  674. if (ret) {
  675. dev_err(dev, "cannot deassert axi master sticky reset\n");
  676. goto err_rst_axi_m_sticky;
  677. }
  678. ret = reset_control_deassert(res->axi_s_reset);
  679. if (ret) {
  680. dev_err(dev, "cannot deassert axi slave reset\n");
  681. goto err_rst_axi_s;
  682. }
  683. ret = reset_control_deassert(res->pwr_reset);
  684. if (ret) {
  685. dev_err(dev, "cannot deassert power reset\n");
  686. goto err_rst_pwr;
  687. }
  688. ret = reset_control_deassert(res->ahb_reset);
  689. if (ret) {
  690. dev_err(dev, "cannot deassert ahb reset\n");
  691. goto err_rst_ahb;
  692. }
  693. usleep_range(10000, 12000);
  694. ret = clk_prepare_enable(res->aux_clk);
  695. if (ret) {
  696. dev_err(dev, "cannot prepare/enable iface clock\n");
  697. goto err_clk_aux;
  698. }
  699. ret = clk_prepare_enable(res->master_clk);
  700. if (ret) {
  701. dev_err(dev, "cannot prepare/enable core clock\n");
  702. goto err_clk_axi_m;
  703. }
  704. ret = clk_prepare_enable(res->slave_clk);
  705. if (ret) {
  706. dev_err(dev, "cannot prepare/enable phy clock\n");
  707. goto err_clk_axi_s;
  708. }
  709. /* enable PCIe clocks and resets */
  710. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  711. val &= !BIT(0);
  712. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  713. /* change DBI base address */
  714. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  715. /* MAC PHY_POWERDOWN MUX DISABLE */
  716. val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
  717. val &= ~BIT(29);
  718. writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
  719. val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  720. val |= BIT(4);
  721. writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
  722. val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  723. val |= BIT(31);
  724. writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  725. return 0;
  726. err_clk_axi_s:
  727. clk_disable_unprepare(res->master_clk);
  728. err_clk_axi_m:
  729. clk_disable_unprepare(res->aux_clk);
  730. err_clk_aux:
  731. reset_control_assert(res->ahb_reset);
  732. err_rst_ahb:
  733. reset_control_assert(res->pwr_reset);
  734. err_rst_pwr:
  735. reset_control_assert(res->axi_s_reset);
  736. err_rst_axi_s:
  737. reset_control_assert(res->axi_m_sticky_reset);
  738. err_rst_axi_m_sticky:
  739. reset_control_assert(res->axi_m_reset);
  740. err_rst_axi_m:
  741. reset_control_assert(res->pipe_sticky_reset);
  742. err_rst_pipe_sticky:
  743. reset_control_assert(res->pipe_reset);
  744. err_rst_pipe:
  745. reset_control_assert(res->phy_reset);
  746. err_rst_phy:
  747. reset_control_assert(res->phy_ahb_reset);
  748. return ret;
  749. }
  750. static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
  751. {
  752. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  753. struct dw_pcie *pci = pcie->pci;
  754. struct device *dev = pci->dev;
  755. int i;
  756. const char *rst_names[] = { "axi_m", "axi_s", "pipe",
  757. "axi_m_sticky", "sticky",
  758. "ahb", "sleep", };
  759. res->iface = devm_clk_get(dev, "iface");
  760. if (IS_ERR(res->iface))
  761. return PTR_ERR(res->iface);
  762. res->axi_m_clk = devm_clk_get(dev, "axi_m");
  763. if (IS_ERR(res->axi_m_clk))
  764. return PTR_ERR(res->axi_m_clk);
  765. res->axi_s_clk = devm_clk_get(dev, "axi_s");
  766. if (IS_ERR(res->axi_s_clk))
  767. return PTR_ERR(res->axi_s_clk);
  768. res->ahb_clk = devm_clk_get(dev, "ahb");
  769. if (IS_ERR(res->ahb_clk))
  770. return PTR_ERR(res->ahb_clk);
  771. res->aux_clk = devm_clk_get(dev, "aux");
  772. if (IS_ERR(res->aux_clk))
  773. return PTR_ERR(res->aux_clk);
  774. for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
  775. res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
  776. if (IS_ERR(res->rst[i]))
  777. return PTR_ERR(res->rst[i]);
  778. }
  779. return 0;
  780. }
  781. static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
  782. {
  783. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  784. clk_disable_unprepare(res->iface);
  785. clk_disable_unprepare(res->axi_m_clk);
  786. clk_disable_unprepare(res->axi_s_clk);
  787. clk_disable_unprepare(res->ahb_clk);
  788. clk_disable_unprepare(res->aux_clk);
  789. }
  790. static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
  791. {
  792. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  793. struct dw_pcie *pci = pcie->pci;
  794. struct device *dev = pci->dev;
  795. int i, ret;
  796. u32 val;
  797. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  798. ret = reset_control_assert(res->rst[i]);
  799. if (ret) {
  800. dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
  801. return ret;
  802. }
  803. }
  804. usleep_range(2000, 2500);
  805. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  806. ret = reset_control_deassert(res->rst[i]);
  807. if (ret) {
  808. dev_err(dev, "reset #%d deassert failed (%d)\n", i,
  809. ret);
  810. return ret;
  811. }
  812. }
  813. /*
  814. * Don't have a way to see if the reset has completed.
  815. * Wait for some time.
  816. */
  817. usleep_range(2000, 2500);
  818. ret = clk_prepare_enable(res->iface);
  819. if (ret) {
  820. dev_err(dev, "cannot prepare/enable core clock\n");
  821. goto err_clk_iface;
  822. }
  823. ret = clk_prepare_enable(res->axi_m_clk);
  824. if (ret) {
  825. dev_err(dev, "cannot prepare/enable core clock\n");
  826. goto err_clk_axi_m;
  827. }
  828. ret = clk_prepare_enable(res->axi_s_clk);
  829. if (ret) {
  830. dev_err(dev, "cannot prepare/enable axi slave clock\n");
  831. goto err_clk_axi_s;
  832. }
  833. ret = clk_prepare_enable(res->ahb_clk);
  834. if (ret) {
  835. dev_err(dev, "cannot prepare/enable ahb clock\n");
  836. goto err_clk_ahb;
  837. }
  838. ret = clk_prepare_enable(res->aux_clk);
  839. if (ret) {
  840. dev_err(dev, "cannot prepare/enable aux clock\n");
  841. goto err_clk_aux;
  842. }
  843. writel(SLV_ADDR_SPACE_SZ,
  844. pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
  845. val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
  846. val &= ~BIT(0);
  847. writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
  848. writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
  849. writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
  850. | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
  851. AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
  852. pcie->parf + PCIE20_PARF_SYS_CTRL);
  853. writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
  854. writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
  855. writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
  856. writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
  857. val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
  858. val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
  859. writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
  860. writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
  861. PCIE20_DEVICE_CONTROL2_STATUS2);
  862. return 0;
  863. err_clk_aux:
  864. clk_disable_unprepare(res->ahb_clk);
  865. err_clk_ahb:
  866. clk_disable_unprepare(res->axi_s_clk);
  867. err_clk_axi_s:
  868. clk_disable_unprepare(res->axi_m_clk);
  869. err_clk_axi_m:
  870. clk_disable_unprepare(res->iface);
  871. err_clk_iface:
  872. /*
  873. * Not checking for failure, will anyway return
  874. * the original failure in 'ret'.
  875. */
  876. for (i = 0; i < ARRAY_SIZE(res->rst); i++)
  877. reset_control_assert(res->rst[i]);
  878. return ret;
  879. }
  880. static int qcom_pcie_link_up(struct dw_pcie *pci)
  881. {
  882. u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
  883. return !!(val & PCI_EXP_LNKSTA_DLLLA);
  884. }
  885. static int qcom_pcie_host_init(struct pcie_port *pp)
  886. {
  887. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  888. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  889. int ret;
  890. qcom_ep_reset_assert(pcie);
  891. ret = pcie->ops->init(pcie);
  892. if (ret)
  893. return ret;
  894. ret = phy_power_on(pcie->phy);
  895. if (ret)
  896. goto err_deinit;
  897. if (pcie->ops->post_init) {
  898. ret = pcie->ops->post_init(pcie);
  899. if (ret)
  900. goto err_disable_phy;
  901. }
  902. dw_pcie_setup_rc(pp);
  903. if (IS_ENABLED(CONFIG_PCI_MSI))
  904. dw_pcie_msi_init(pp);
  905. qcom_ep_reset_deassert(pcie);
  906. ret = qcom_pcie_establish_link(pcie);
  907. if (ret)
  908. goto err;
  909. return 0;
  910. err:
  911. qcom_ep_reset_assert(pcie);
  912. if (pcie->ops->post_deinit)
  913. pcie->ops->post_deinit(pcie);
  914. err_disable_phy:
  915. phy_power_off(pcie->phy);
  916. err_deinit:
  917. pcie->ops->deinit(pcie);
  918. return ret;
  919. }
  920. static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  921. u32 *val)
  922. {
  923. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  924. /* the device class is not reported correctly from the register */
  925. if (where == PCI_CLASS_REVISION && size == 4) {
  926. *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
  927. *val &= 0xff; /* keep revision id */
  928. *val |= PCI_CLASS_BRIDGE_PCI << 16;
  929. return PCIBIOS_SUCCESSFUL;
  930. }
  931. return dw_pcie_read(pci->dbi_base + where, size, val);
  932. }
  933. static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
  934. .host_init = qcom_pcie_host_init,
  935. .rd_own_conf = qcom_pcie_rd_own_conf,
  936. };
  937. /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
  938. static const struct qcom_pcie_ops ops_2_1_0 = {
  939. .get_resources = qcom_pcie_get_resources_2_1_0,
  940. .init = qcom_pcie_init_2_1_0,
  941. .deinit = qcom_pcie_deinit_2_1_0,
  942. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  943. };
  944. /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
  945. static const struct qcom_pcie_ops ops_1_0_0 = {
  946. .get_resources = qcom_pcie_get_resources_1_0_0,
  947. .init = qcom_pcie_init_1_0_0,
  948. .deinit = qcom_pcie_deinit_1_0_0,
  949. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  950. };
  951. /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
  952. static const struct qcom_pcie_ops ops_2_3_2 = {
  953. .get_resources = qcom_pcie_get_resources_2_3_2,
  954. .init = qcom_pcie_init_2_3_2,
  955. .post_init = qcom_pcie_post_init_2_3_2,
  956. .deinit = qcom_pcie_deinit_2_3_2,
  957. .post_deinit = qcom_pcie_post_deinit_2_3_2,
  958. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  959. };
  960. /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
  961. static const struct qcom_pcie_ops ops_2_4_0 = {
  962. .get_resources = qcom_pcie_get_resources_2_4_0,
  963. .init = qcom_pcie_init_2_4_0,
  964. .deinit = qcom_pcie_deinit_2_4_0,
  965. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  966. };
  967. /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
  968. static const struct qcom_pcie_ops ops_2_3_3 = {
  969. .get_resources = qcom_pcie_get_resources_2_3_3,
  970. .init = qcom_pcie_init_2_3_3,
  971. .deinit = qcom_pcie_deinit_2_3_3,
  972. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  973. };
  974. static const struct dw_pcie_ops dw_pcie_ops = {
  975. .link_up = qcom_pcie_link_up,
  976. };
  977. static int qcom_pcie_probe(struct platform_device *pdev)
  978. {
  979. struct device *dev = &pdev->dev;
  980. struct resource *res;
  981. struct pcie_port *pp;
  982. struct dw_pcie *pci;
  983. struct qcom_pcie *pcie;
  984. int ret;
  985. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  986. if (!pcie)
  987. return -ENOMEM;
  988. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  989. if (!pci)
  990. return -ENOMEM;
  991. pci->dev = dev;
  992. pci->ops = &dw_pcie_ops;
  993. pp = &pci->pp;
  994. pcie->pci = pci;
  995. pcie->ops = of_device_get_match_data(dev);
  996. pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
  997. if (IS_ERR(pcie->reset))
  998. return PTR_ERR(pcie->reset);
  999. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
  1000. pcie->parf = devm_ioremap_resource(dev, res);
  1001. if (IS_ERR(pcie->parf))
  1002. return PTR_ERR(pcie->parf);
  1003. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
  1004. pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
  1005. if (IS_ERR(pci->dbi_base))
  1006. return PTR_ERR(pci->dbi_base);
  1007. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
  1008. pcie->elbi = devm_ioremap_resource(dev, res);
  1009. if (IS_ERR(pcie->elbi))
  1010. return PTR_ERR(pcie->elbi);
  1011. pcie->phy = devm_phy_optional_get(dev, "pciephy");
  1012. if (IS_ERR(pcie->phy))
  1013. return PTR_ERR(pcie->phy);
  1014. ret = pcie->ops->get_resources(pcie);
  1015. if (ret)
  1016. return ret;
  1017. pp->root_bus_nr = -1;
  1018. pp->ops = &qcom_pcie_dw_ops;
  1019. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  1020. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  1021. if (pp->msi_irq < 0)
  1022. return pp->msi_irq;
  1023. }
  1024. ret = phy_init(pcie->phy);
  1025. if (ret)
  1026. return ret;
  1027. platform_set_drvdata(pdev, pcie);
  1028. ret = dw_pcie_host_init(pp);
  1029. if (ret) {
  1030. dev_err(dev, "cannot initialize host\n");
  1031. return ret;
  1032. }
  1033. return 0;
  1034. }
  1035. static const struct of_device_id qcom_pcie_match[] = {
  1036. { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
  1037. { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
  1038. { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
  1039. { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
  1040. { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
  1041. { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
  1042. { }
  1043. };
  1044. static struct platform_driver qcom_pcie_driver = {
  1045. .probe = qcom_pcie_probe,
  1046. .driver = {
  1047. .name = "qcom-pcie",
  1048. .suppress_bind_attrs = true,
  1049. .of_match_table = qcom_pcie_match,
  1050. },
  1051. };
  1052. builtin_platform_driver(qcom_pcie_driver);