pcie-designware.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. /*
  2. * Synopsys Designware PCIe host controller driver
  3. *
  4. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com
  6. *
  7. * Author: Jingoo Han <jg1.han@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/irq.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/msi.h>
  18. #include <linux/of_address.h>
  19. #include <linux/of_pci.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_regs.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/types.h>
  24. #include <linux/delay.h>
  25. #include "pcie-designware.h"
  26. /* Synopsis specific PCIE configuration registers */
  27. #define PCIE_PORT_LINK_CONTROL 0x710
  28. #define PORT_LINK_MODE_MASK (0x3f << 16)
  29. #define PORT_LINK_MODE_1_LANES (0x1 << 16)
  30. #define PORT_LINK_MODE_2_LANES (0x3 << 16)
  31. #define PORT_LINK_MODE_4_LANES (0x7 << 16)
  32. #define PORT_LINK_MODE_8_LANES (0xf << 16)
  33. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  34. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  35. #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
  36. #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
  37. #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
  38. #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
  39. #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
  40. #define PCIE_MSI_ADDR_LO 0x820
  41. #define PCIE_MSI_ADDR_HI 0x824
  42. #define PCIE_MSI_INTR0_ENABLE 0x828
  43. #define PCIE_MSI_INTR0_MASK 0x82C
  44. #define PCIE_MSI_INTR0_STATUS 0x830
  45. #define PCIE_ATU_VIEWPORT 0x900
  46. #define PCIE_ATU_REGION_INBOUND (0x1 << 31)
  47. #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
  48. #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
  49. #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
  50. #define PCIE_ATU_CR1 0x904
  51. #define PCIE_ATU_TYPE_MEM (0x0 << 0)
  52. #define PCIE_ATU_TYPE_IO (0x2 << 0)
  53. #define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
  54. #define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
  55. #define PCIE_ATU_CR2 0x908
  56. #define PCIE_ATU_ENABLE (0x1 << 31)
  57. #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
  58. #define PCIE_ATU_LOWER_BASE 0x90C
  59. #define PCIE_ATU_UPPER_BASE 0x910
  60. #define PCIE_ATU_LIMIT 0x914
  61. #define PCIE_ATU_LOWER_TARGET 0x918
  62. #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
  63. #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
  64. #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
  65. #define PCIE_ATU_UPPER_TARGET 0x91C
  66. /* PCIe Port Logic registers */
  67. #define PLR_OFFSET 0x700
  68. #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
  69. #define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010
  70. static struct pci_ops dw_pcie_ops;
  71. int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
  72. {
  73. if ((uintptr_t)addr & (size - 1)) {
  74. *val = 0;
  75. return PCIBIOS_BAD_REGISTER_NUMBER;
  76. }
  77. if (size == 4)
  78. *val = readl(addr);
  79. else if (size == 2)
  80. *val = readw(addr);
  81. else if (size == 1)
  82. *val = readb(addr);
  83. else {
  84. *val = 0;
  85. return PCIBIOS_BAD_REGISTER_NUMBER;
  86. }
  87. return PCIBIOS_SUCCESSFUL;
  88. }
  89. int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
  90. {
  91. if ((uintptr_t)addr & (size - 1))
  92. return PCIBIOS_BAD_REGISTER_NUMBER;
  93. if (size == 4)
  94. writel(val, addr);
  95. else if (size == 2)
  96. writew(val, addr);
  97. else if (size == 1)
  98. writeb(val, addr);
  99. else
  100. return PCIBIOS_BAD_REGISTER_NUMBER;
  101. return PCIBIOS_SUCCESSFUL;
  102. }
  103. static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
  104. {
  105. if (pp->ops->readl_rc)
  106. pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
  107. else
  108. *val = readl(pp->dbi_base + reg);
  109. }
  110. static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
  111. {
  112. if (pp->ops->writel_rc)
  113. pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
  114. else
  115. writel(val, pp->dbi_base + reg);
  116. }
  117. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  118. u32 *val)
  119. {
  120. if (pp->ops->rd_own_conf)
  121. return pp->ops->rd_own_conf(pp, where, size, val);
  122. return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
  123. }
  124. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  125. u32 val)
  126. {
  127. if (pp->ops->wr_own_conf)
  128. return pp->ops->wr_own_conf(pp, where, size, val);
  129. return dw_pcie_cfg_write(pp->dbi_base + where, size, val);
  130. }
  131. static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
  132. int type, u64 cpu_addr, u64 pci_addr, u32 size)
  133. {
  134. u32 val;
  135. dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
  136. PCIE_ATU_VIEWPORT);
  137. dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
  138. dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
  139. dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
  140. PCIE_ATU_LIMIT);
  141. dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
  142. dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
  143. dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
  144. dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
  145. /*
  146. * Make sure ATU enable takes effect before any subsequent config
  147. * and I/O accesses.
  148. */
  149. dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val);
  150. }
  151. static struct irq_chip dw_msi_irq_chip = {
  152. .name = "PCI-MSI",
  153. .irq_enable = pci_msi_unmask_irq,
  154. .irq_disable = pci_msi_mask_irq,
  155. .irq_mask = pci_msi_mask_irq,
  156. .irq_unmask = pci_msi_unmask_irq,
  157. };
  158. /* MSI int handler */
  159. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  160. {
  161. unsigned long val;
  162. int i, pos, irq;
  163. irqreturn_t ret = IRQ_NONE;
  164. for (i = 0; i < MAX_MSI_CTRLS; i++) {
  165. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  166. (u32 *)&val);
  167. if (val) {
  168. ret = IRQ_HANDLED;
  169. pos = 0;
  170. while ((pos = find_next_bit(&val, 32, pos)) != 32) {
  171. irq = irq_find_mapping(pp->irq_domain,
  172. i * 32 + pos);
  173. dw_pcie_wr_own_conf(pp,
  174. PCIE_MSI_INTR0_STATUS + i * 12,
  175. 4, 1 << pos);
  176. generic_handle_irq(irq);
  177. pos++;
  178. }
  179. }
  180. }
  181. return ret;
  182. }
  183. void dw_pcie_msi_init(struct pcie_port *pp)
  184. {
  185. u64 msi_target;
  186. pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
  187. msi_target = virt_to_phys((void *)pp->msi_data);
  188. /* program the msi_data */
  189. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  190. (u32)(msi_target & 0xffffffff));
  191. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  192. (u32)(msi_target >> 32 & 0xffffffff));
  193. }
  194. static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
  195. {
  196. unsigned int res, bit, val;
  197. res = (irq / 32) * 12;
  198. bit = irq % 32;
  199. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  200. val &= ~(1 << bit);
  201. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  202. }
  203. static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
  204. unsigned int nvec, unsigned int pos)
  205. {
  206. unsigned int i;
  207. for (i = 0; i < nvec; i++) {
  208. irq_set_msi_desc_off(irq_base, i, NULL);
  209. /* Disable corresponding interrupt on MSI controller */
  210. if (pp->ops->msi_clear_irq)
  211. pp->ops->msi_clear_irq(pp, pos + i);
  212. else
  213. dw_pcie_msi_clear_irq(pp, pos + i);
  214. }
  215. bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
  216. }
  217. static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
  218. {
  219. unsigned int res, bit, val;
  220. res = (irq / 32) * 12;
  221. bit = irq % 32;
  222. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  223. val |= 1 << bit;
  224. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  225. }
  226. static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
  227. {
  228. int irq, pos0, i;
  229. struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
  230. pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
  231. order_base_2(no_irqs));
  232. if (pos0 < 0)
  233. goto no_valid_irq;
  234. irq = irq_find_mapping(pp->irq_domain, pos0);
  235. if (!irq)
  236. goto no_valid_irq;
  237. /*
  238. * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
  239. * descs so there is no need to allocate descs here. We can therefore
  240. * assume that if irq_find_mapping above returns non-zero, then the
  241. * descs are also successfully allocated.
  242. */
  243. for (i = 0; i < no_irqs; i++) {
  244. if (irq_set_msi_desc_off(irq, i, desc) != 0) {
  245. clear_irq_range(pp, irq, i, pos0);
  246. goto no_valid_irq;
  247. }
  248. /*Enable corresponding interrupt in MSI interrupt controller */
  249. if (pp->ops->msi_set_irq)
  250. pp->ops->msi_set_irq(pp, pos0 + i);
  251. else
  252. dw_pcie_msi_set_irq(pp, pos0 + i);
  253. }
  254. *pos = pos0;
  255. desc->nvec_used = no_irqs;
  256. desc->msi_attrib.multiple = order_base_2(no_irqs);
  257. return irq;
  258. no_valid_irq:
  259. *pos = pos0;
  260. return -ENOSPC;
  261. }
  262. static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
  263. {
  264. struct msi_msg msg;
  265. u64 msi_target;
  266. if (pp->ops->get_msi_addr)
  267. msi_target = pp->ops->get_msi_addr(pp);
  268. else
  269. msi_target = virt_to_phys((void *)pp->msi_data);
  270. msg.address_lo = (u32)(msi_target & 0xffffffff);
  271. msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
  272. if (pp->ops->get_msi_data)
  273. msg.data = pp->ops->get_msi_data(pp, pos);
  274. else
  275. msg.data = pos;
  276. pci_write_msi_msg(irq, &msg);
  277. }
  278. static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  279. struct msi_desc *desc)
  280. {
  281. int irq, pos;
  282. struct pcie_port *pp = pdev->bus->sysdata;
  283. if (desc->msi_attrib.is_msix)
  284. return -EINVAL;
  285. irq = assign_irq(1, desc, &pos);
  286. if (irq < 0)
  287. return irq;
  288. dw_msi_setup_msg(pp, irq, pos);
  289. return 0;
  290. }
  291. static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
  292. int nvec, int type)
  293. {
  294. #ifdef CONFIG_PCI_MSI
  295. int irq, pos;
  296. struct msi_desc *desc;
  297. struct pcie_port *pp = pdev->bus->sysdata;
  298. /* MSI-X interrupts are not supported */
  299. if (type == PCI_CAP_ID_MSIX)
  300. return -EINVAL;
  301. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  302. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  303. irq = assign_irq(nvec, desc, &pos);
  304. if (irq < 0)
  305. return irq;
  306. dw_msi_setup_msg(pp, irq, pos);
  307. return 0;
  308. #else
  309. return -EINVAL;
  310. #endif
  311. }
  312. static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  313. {
  314. struct irq_data *data = irq_get_irq_data(irq);
  315. struct msi_desc *msi = irq_data_get_msi_desc(data);
  316. struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
  317. clear_irq_range(pp, irq, 1, data->hwirq);
  318. }
  319. static struct msi_controller dw_pcie_msi_chip = {
  320. .setup_irq = dw_msi_setup_irq,
  321. .setup_irqs = dw_msi_setup_irqs,
  322. .teardown_irq = dw_msi_teardown_irq,
  323. };
  324. int dw_pcie_wait_for_link(struct pcie_port *pp)
  325. {
  326. int retries;
  327. /* check if the link is up or not */
  328. for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
  329. if (dw_pcie_link_up(pp)) {
  330. dev_info(pp->dev, "link up\n");
  331. return 0;
  332. }
  333. usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
  334. }
  335. dev_err(pp->dev, "phy link never came up\n");
  336. return -ETIMEDOUT;
  337. }
  338. int dw_pcie_link_up(struct pcie_port *pp)
  339. {
  340. u32 val;
  341. if (pp->ops->link_up)
  342. return pp->ops->link_up(pp);
  343. val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
  344. return val & PCIE_PHY_DEBUG_R1_LINK_UP;
  345. }
  346. static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  347. irq_hw_number_t hwirq)
  348. {
  349. irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
  350. irq_set_chip_data(irq, domain->host_data);
  351. return 0;
  352. }
  353. static const struct irq_domain_ops msi_domain_ops = {
  354. .map = dw_pcie_msi_map,
  355. };
  356. int dw_pcie_host_init(struct pcie_port *pp)
  357. {
  358. struct device_node *np = pp->dev->of_node;
  359. struct platform_device *pdev = to_platform_device(pp->dev);
  360. struct pci_bus *bus, *child;
  361. struct resource *cfg_res;
  362. u32 val;
  363. int i, ret;
  364. LIST_HEAD(res);
  365. struct resource_entry *win;
  366. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  367. if (cfg_res) {
  368. pp->cfg0_size = resource_size(cfg_res)/2;
  369. pp->cfg1_size = resource_size(cfg_res)/2;
  370. pp->cfg0_base = cfg_res->start;
  371. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  372. } else if (!pp->va_cfg0_base) {
  373. dev_err(pp->dev, "missing *config* reg space\n");
  374. }
  375. ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
  376. if (ret)
  377. return ret;
  378. /* Get the I/O and memory ranges from DT */
  379. resource_list_for_each_entry(win, &res) {
  380. switch (resource_type(win->res)) {
  381. case IORESOURCE_IO:
  382. pp->io = win->res;
  383. pp->io->name = "I/O";
  384. pp->io_size = resource_size(pp->io);
  385. pp->io_bus_addr = pp->io->start - win->offset;
  386. ret = pci_remap_iospace(pp->io, pp->io_base);
  387. if (ret) {
  388. dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
  389. ret, pp->io);
  390. continue;
  391. }
  392. break;
  393. case IORESOURCE_MEM:
  394. pp->mem = win->res;
  395. pp->mem->name = "MEM";
  396. pp->mem_size = resource_size(pp->mem);
  397. pp->mem_bus_addr = pp->mem->start - win->offset;
  398. break;
  399. case 0:
  400. pp->cfg = win->res;
  401. pp->cfg0_size = resource_size(pp->cfg)/2;
  402. pp->cfg1_size = resource_size(pp->cfg)/2;
  403. pp->cfg0_base = pp->cfg->start;
  404. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  405. break;
  406. case IORESOURCE_BUS:
  407. pp->busn = win->res;
  408. break;
  409. default:
  410. continue;
  411. }
  412. }
  413. if (!pp->dbi_base) {
  414. pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
  415. resource_size(pp->cfg));
  416. if (!pp->dbi_base) {
  417. dev_err(pp->dev, "error with ioremap\n");
  418. return -ENOMEM;
  419. }
  420. }
  421. pp->mem_base = pp->mem->start;
  422. if (!pp->va_cfg0_base) {
  423. pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
  424. pp->cfg0_size);
  425. if (!pp->va_cfg0_base) {
  426. dev_err(pp->dev, "error with ioremap in function\n");
  427. return -ENOMEM;
  428. }
  429. }
  430. if (!pp->va_cfg1_base) {
  431. pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
  432. pp->cfg1_size);
  433. if (!pp->va_cfg1_base) {
  434. dev_err(pp->dev, "error with ioremap\n");
  435. return -ENOMEM;
  436. }
  437. }
  438. ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
  439. if (ret)
  440. pp->lanes = 0;
  441. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  442. if (!pp->ops->msi_host_init) {
  443. pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
  444. MAX_MSI_IRQS, &msi_domain_ops,
  445. &dw_pcie_msi_chip);
  446. if (!pp->irq_domain) {
  447. dev_err(pp->dev, "irq domain init failed\n");
  448. return -ENXIO;
  449. }
  450. for (i = 0; i < MAX_MSI_IRQS; i++)
  451. irq_create_mapping(pp->irq_domain, i);
  452. } else {
  453. ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
  454. if (ret < 0)
  455. return ret;
  456. }
  457. }
  458. if (pp->ops->host_init)
  459. pp->ops->host_init(pp);
  460. /*
  461. * If the platform provides ->rd_other_conf, it means the platform
  462. * uses its own address translation component rather than ATU, so
  463. * we should not program the ATU here.
  464. */
  465. if (!pp->ops->rd_other_conf)
  466. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
  467. PCIE_ATU_TYPE_MEM, pp->mem_base,
  468. pp->mem_bus_addr, pp->mem_size);
  469. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  470. /* program correct class for RC */
  471. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  472. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  473. val |= PORT_LOGIC_SPEED_CHANGE;
  474. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  475. pp->root_bus_nr = pp->busn->start;
  476. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  477. bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
  478. &dw_pcie_ops, pp, &res,
  479. &dw_pcie_msi_chip);
  480. dw_pcie_msi_chip.dev = pp->dev;
  481. } else
  482. bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
  483. pp, &res);
  484. if (!bus)
  485. return -ENOMEM;
  486. if (pp->ops->scan_bus)
  487. pp->ops->scan_bus(pp);
  488. #ifdef CONFIG_ARM
  489. /* support old dtbs that incorrectly describe IRQs */
  490. pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
  491. #endif
  492. pci_bus_size_bridges(bus);
  493. pci_bus_assign_resources(bus);
  494. list_for_each_entry(child, &bus->children, node)
  495. pcie_bus_configure_settings(child);
  496. pci_bus_add_devices(bus);
  497. return 0;
  498. }
  499. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  500. u32 devfn, int where, int size, u32 *val)
  501. {
  502. int ret, type;
  503. u32 busdev, cfg_size;
  504. u64 cpu_addr;
  505. void __iomem *va_cfg_base;
  506. if (pp->ops->rd_other_conf)
  507. return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
  508. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  509. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  510. if (bus->parent->number == pp->root_bus_nr) {
  511. type = PCIE_ATU_TYPE_CFG0;
  512. cpu_addr = pp->cfg0_base;
  513. cfg_size = pp->cfg0_size;
  514. va_cfg_base = pp->va_cfg0_base;
  515. } else {
  516. type = PCIE_ATU_TYPE_CFG1;
  517. cpu_addr = pp->cfg1_base;
  518. cfg_size = pp->cfg1_size;
  519. va_cfg_base = pp->va_cfg1_base;
  520. }
  521. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  522. type, cpu_addr,
  523. busdev, cfg_size);
  524. ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
  525. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  526. PCIE_ATU_TYPE_IO, pp->io_base,
  527. pp->io_bus_addr, pp->io_size);
  528. return ret;
  529. }
  530. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  531. u32 devfn, int where, int size, u32 val)
  532. {
  533. int ret, type;
  534. u32 busdev, cfg_size;
  535. u64 cpu_addr;
  536. void __iomem *va_cfg_base;
  537. if (pp->ops->wr_other_conf)
  538. return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
  539. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  540. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  541. if (bus->parent->number == pp->root_bus_nr) {
  542. type = PCIE_ATU_TYPE_CFG0;
  543. cpu_addr = pp->cfg0_base;
  544. cfg_size = pp->cfg0_size;
  545. va_cfg_base = pp->va_cfg0_base;
  546. } else {
  547. type = PCIE_ATU_TYPE_CFG1;
  548. cpu_addr = pp->cfg1_base;
  549. cfg_size = pp->cfg1_size;
  550. va_cfg_base = pp->va_cfg1_base;
  551. }
  552. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  553. type, cpu_addr,
  554. busdev, cfg_size);
  555. ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
  556. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  557. PCIE_ATU_TYPE_IO, pp->io_base,
  558. pp->io_bus_addr, pp->io_size);
  559. return ret;
  560. }
  561. static int dw_pcie_valid_config(struct pcie_port *pp,
  562. struct pci_bus *bus, int dev)
  563. {
  564. /* If there is no link, then there is no device */
  565. if (bus->number != pp->root_bus_nr) {
  566. if (!dw_pcie_link_up(pp))
  567. return 0;
  568. }
  569. /* access only one slot on each root port */
  570. if (bus->number == pp->root_bus_nr && dev > 0)
  571. return 0;
  572. /*
  573. * do not read more than one device on the bus directly attached
  574. * to RC's (Virtual Bridge's) DS side.
  575. */
  576. if (bus->primary == pp->root_bus_nr && dev > 0)
  577. return 0;
  578. return 1;
  579. }
  580. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  581. int size, u32 *val)
  582. {
  583. struct pcie_port *pp = bus->sysdata;
  584. if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
  585. *val = 0xffffffff;
  586. return PCIBIOS_DEVICE_NOT_FOUND;
  587. }
  588. if (bus->number == pp->root_bus_nr)
  589. return dw_pcie_rd_own_conf(pp, where, size, val);
  590. return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
  591. }
  592. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  593. int where, int size, u32 val)
  594. {
  595. struct pcie_port *pp = bus->sysdata;
  596. if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
  597. return PCIBIOS_DEVICE_NOT_FOUND;
  598. if (bus->number == pp->root_bus_nr)
  599. return dw_pcie_wr_own_conf(pp, where, size, val);
  600. return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
  601. }
  602. static struct pci_ops dw_pcie_ops = {
  603. .read = dw_pcie_rd_conf,
  604. .write = dw_pcie_wr_conf,
  605. };
  606. void dw_pcie_setup_rc(struct pcie_port *pp)
  607. {
  608. u32 val;
  609. u32 membase;
  610. u32 memlimit;
  611. /* set the number of lanes */
  612. dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
  613. val &= ~PORT_LINK_MODE_MASK;
  614. switch (pp->lanes) {
  615. case 1:
  616. val |= PORT_LINK_MODE_1_LANES;
  617. break;
  618. case 2:
  619. val |= PORT_LINK_MODE_2_LANES;
  620. break;
  621. case 4:
  622. val |= PORT_LINK_MODE_4_LANES;
  623. break;
  624. case 8:
  625. val |= PORT_LINK_MODE_8_LANES;
  626. break;
  627. default:
  628. dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
  629. return;
  630. }
  631. dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
  632. /* set link width speed control register */
  633. dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
  634. val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
  635. switch (pp->lanes) {
  636. case 1:
  637. val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
  638. break;
  639. case 2:
  640. val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
  641. break;
  642. case 4:
  643. val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
  644. break;
  645. case 8:
  646. val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
  647. break;
  648. }
  649. dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
  650. /* setup RC BARs */
  651. dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
  652. dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
  653. /* setup interrupt pins */
  654. dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
  655. val &= 0xffff00ff;
  656. val |= 0x00000100;
  657. dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
  658. /* setup bus numbers */
  659. dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
  660. val &= 0xff000000;
  661. val |= 0x00010100;
  662. dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
  663. /* setup memory base, memory limit */
  664. membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
  665. memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
  666. val = memlimit | membase;
  667. dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
  668. /* setup command register */
  669. dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
  670. val &= 0xffff0000;
  671. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  672. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  673. dw_pcie_writel_rc(pp, val, PCI_COMMAND);
  674. }
  675. MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
  676. MODULE_DESCRIPTION("Designware PCIe host controller driver");
  677. MODULE_LICENSE("GPL v2");