pcie-designware-host.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Synopsys DesignWare PCIe host controller driver
  4. *
  5. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com
  7. *
  8. * Author: Jingoo Han <jg1.han@samsung.com>
  9. */
  10. #include <linux/irqdomain.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_pci.h>
  13. #include <linux/pci_regs.h>
  14. #include <linux/platform_device.h>
  15. #include "pcie-designware.h"
  16. static struct pci_ops dw_pcie_ops;
  17. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  18. u32 *val)
  19. {
  20. struct dw_pcie *pci;
  21. if (pp->ops->rd_own_conf)
  22. return pp->ops->rd_own_conf(pp, where, size, val);
  23. pci = to_dw_pcie_from_pp(pp);
  24. return dw_pcie_read(pci->dbi_base + where, size, val);
  25. }
  26. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  27. u32 val)
  28. {
  29. struct dw_pcie *pci;
  30. if (pp->ops->wr_own_conf)
  31. return pp->ops->wr_own_conf(pp, where, size, val);
  32. pci = to_dw_pcie_from_pp(pp);
  33. return dw_pcie_write(pci->dbi_base + where, size, val);
  34. }
  35. static struct irq_chip dw_msi_irq_chip = {
  36. .name = "PCI-MSI",
  37. .irq_enable = pci_msi_unmask_irq,
  38. .irq_disable = pci_msi_mask_irq,
  39. .irq_mask = pci_msi_mask_irq,
  40. .irq_unmask = pci_msi_unmask_irq,
  41. };
  42. /* MSI int handler */
  43. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  44. {
  45. u32 val;
  46. int i, pos, irq;
  47. irqreturn_t ret = IRQ_NONE;
  48. for (i = 0; i < MAX_MSI_CTRLS; i++) {
  49. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  50. &val);
  51. if (!val)
  52. continue;
  53. ret = IRQ_HANDLED;
  54. pos = 0;
  55. while ((pos = find_next_bit((unsigned long *) &val, 32,
  56. pos)) != 32) {
  57. irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
  58. generic_handle_irq(irq);
  59. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
  60. 4, 1 << pos);
  61. pos++;
  62. }
  63. }
  64. return ret;
  65. }
  66. void dw_pcie_msi_init(struct pcie_port *pp)
  67. {
  68. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  69. struct device *dev = pci->dev;
  70. struct page *page;
  71. u64 msi_target;
  72. page = alloc_page(GFP_KERNEL);
  73. pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  74. if (dma_mapping_error(dev, pp->msi_data)) {
  75. dev_err(dev, "failed to map MSI data\n");
  76. __free_page(page);
  77. return;
  78. }
  79. msi_target = (u64)pp->msi_data;
  80. /* program the msi_data */
  81. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  82. (u32)(msi_target & 0xffffffff));
  83. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  84. (u32)(msi_target >> 32 & 0xffffffff));
  85. }
  86. static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
  87. {
  88. unsigned int res, bit, val;
  89. res = (irq / 32) * 12;
  90. bit = irq % 32;
  91. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  92. val &= ~(1 << bit);
  93. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  94. }
  95. static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
  96. unsigned int nvec, unsigned int pos)
  97. {
  98. unsigned int i;
  99. for (i = 0; i < nvec; i++) {
  100. irq_set_msi_desc_off(irq_base, i, NULL);
  101. /* Disable corresponding interrupt on MSI controller */
  102. if (pp->ops->msi_clear_irq)
  103. pp->ops->msi_clear_irq(pp, pos + i);
  104. else
  105. dw_pcie_msi_clear_irq(pp, pos + i);
  106. }
  107. bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
  108. }
  109. static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
  110. {
  111. unsigned int res, bit, val;
  112. res = (irq / 32) * 12;
  113. bit = irq % 32;
  114. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  115. val |= 1 << bit;
  116. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  117. }
  118. static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
  119. {
  120. int irq, pos0, i;
  121. struct pcie_port *pp;
  122. pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
  123. pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
  124. order_base_2(no_irqs));
  125. if (pos0 < 0)
  126. goto no_valid_irq;
  127. irq = irq_find_mapping(pp->irq_domain, pos0);
  128. if (!irq)
  129. goto no_valid_irq;
  130. /*
  131. * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
  132. * descs so there is no need to allocate descs here. We can therefore
  133. * assume that if irq_find_mapping above returns non-zero, then the
  134. * descs are also successfully allocated.
  135. */
  136. for (i = 0; i < no_irqs; i++) {
  137. if (irq_set_msi_desc_off(irq, i, desc) != 0) {
  138. clear_irq_range(pp, irq, i, pos0);
  139. goto no_valid_irq;
  140. }
  141. /*Enable corresponding interrupt in MSI interrupt controller */
  142. if (pp->ops->msi_set_irq)
  143. pp->ops->msi_set_irq(pp, pos0 + i);
  144. else
  145. dw_pcie_msi_set_irq(pp, pos0 + i);
  146. }
  147. *pos = pos0;
  148. desc->nvec_used = no_irqs;
  149. desc->msi_attrib.multiple = order_base_2(no_irqs);
  150. return irq;
  151. no_valid_irq:
  152. *pos = pos0;
  153. return -ENOSPC;
  154. }
  155. static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
  156. {
  157. struct msi_msg msg;
  158. u64 msi_target;
  159. if (pp->ops->get_msi_addr)
  160. msi_target = pp->ops->get_msi_addr(pp);
  161. else
  162. msi_target = (u64)pp->msi_data;
  163. msg.address_lo = (u32)(msi_target & 0xffffffff);
  164. msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
  165. if (pp->ops->get_msi_data)
  166. msg.data = pp->ops->get_msi_data(pp, pos);
  167. else
  168. msg.data = pos;
  169. pci_write_msi_msg(irq, &msg);
  170. }
  171. static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  172. struct msi_desc *desc)
  173. {
  174. int irq, pos;
  175. struct pcie_port *pp = pdev->bus->sysdata;
  176. if (desc->msi_attrib.is_msix)
  177. return -EINVAL;
  178. irq = assign_irq(1, desc, &pos);
  179. if (irq < 0)
  180. return irq;
  181. dw_msi_setup_msg(pp, irq, pos);
  182. return 0;
  183. }
  184. static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
  185. int nvec, int type)
  186. {
  187. #ifdef CONFIG_PCI_MSI
  188. int irq, pos;
  189. struct msi_desc *desc;
  190. struct pcie_port *pp = pdev->bus->sysdata;
  191. /* MSI-X interrupts are not supported */
  192. if (type == PCI_CAP_ID_MSIX)
  193. return -EINVAL;
  194. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  195. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  196. irq = assign_irq(nvec, desc, &pos);
  197. if (irq < 0)
  198. return irq;
  199. dw_msi_setup_msg(pp, irq, pos);
  200. return 0;
  201. #else
  202. return -EINVAL;
  203. #endif
  204. }
  205. static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  206. {
  207. struct irq_data *data = irq_get_irq_data(irq);
  208. struct msi_desc *msi = irq_data_get_msi_desc(data);
  209. struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
  210. clear_irq_range(pp, irq, 1, data->hwirq);
  211. }
  212. static struct msi_controller dw_pcie_msi_chip = {
  213. .setup_irq = dw_msi_setup_irq,
  214. .setup_irqs = dw_msi_setup_irqs,
  215. .teardown_irq = dw_msi_teardown_irq,
  216. };
  217. static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  218. irq_hw_number_t hwirq)
  219. {
  220. irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
  221. irq_set_chip_data(irq, domain->host_data);
  222. return 0;
  223. }
  224. static const struct irq_domain_ops msi_domain_ops = {
  225. .map = dw_pcie_msi_map,
  226. };
  227. int dw_pcie_host_init(struct pcie_port *pp)
  228. {
  229. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  230. struct device *dev = pci->dev;
  231. struct device_node *np = dev->of_node;
  232. struct platform_device *pdev = to_platform_device(dev);
  233. struct pci_bus *bus, *child;
  234. struct pci_host_bridge *bridge;
  235. struct resource *cfg_res;
  236. int i, ret;
  237. struct resource_entry *win, *tmp;
  238. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  239. if (cfg_res) {
  240. pp->cfg0_size = resource_size(cfg_res) / 2;
  241. pp->cfg1_size = resource_size(cfg_res) / 2;
  242. pp->cfg0_base = cfg_res->start;
  243. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  244. } else if (!pp->va_cfg0_base) {
  245. dev_err(dev, "missing *config* reg space\n");
  246. }
  247. bridge = pci_alloc_host_bridge(0);
  248. if (!bridge)
  249. return -ENOMEM;
  250. ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
  251. &bridge->windows, &pp->io_base);
  252. if (ret)
  253. return ret;
  254. ret = devm_request_pci_bus_resources(dev, &bridge->windows);
  255. if (ret)
  256. goto error;
  257. /* Get the I/O and memory ranges from DT */
  258. resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
  259. switch (resource_type(win->res)) {
  260. case IORESOURCE_IO:
  261. ret = pci_remap_iospace(win->res, pp->io_base);
  262. if (ret) {
  263. dev_warn(dev, "error %d: failed to map resource %pR\n",
  264. ret, win->res);
  265. resource_list_destroy_entry(win);
  266. } else {
  267. pp->io = win->res;
  268. pp->io->name = "I/O";
  269. pp->io_size = resource_size(pp->io);
  270. pp->io_bus_addr = pp->io->start - win->offset;
  271. }
  272. break;
  273. case IORESOURCE_MEM:
  274. pp->mem = win->res;
  275. pp->mem->name = "MEM";
  276. pp->mem_size = resource_size(pp->mem);
  277. pp->mem_bus_addr = pp->mem->start - win->offset;
  278. break;
  279. case 0:
  280. pp->cfg = win->res;
  281. pp->cfg0_size = resource_size(pp->cfg) / 2;
  282. pp->cfg1_size = resource_size(pp->cfg) / 2;
  283. pp->cfg0_base = pp->cfg->start;
  284. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  285. break;
  286. case IORESOURCE_BUS:
  287. pp->busn = win->res;
  288. break;
  289. }
  290. }
  291. if (!pci->dbi_base) {
  292. pci->dbi_base = devm_pci_remap_cfgspace(dev,
  293. pp->cfg->start,
  294. resource_size(pp->cfg));
  295. if (!pci->dbi_base) {
  296. dev_err(dev, "error with ioremap\n");
  297. ret = -ENOMEM;
  298. goto error;
  299. }
  300. }
  301. pp->mem_base = pp->mem->start;
  302. if (!pp->va_cfg0_base) {
  303. pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
  304. pp->cfg0_base, pp->cfg0_size);
  305. if (!pp->va_cfg0_base) {
  306. dev_err(dev, "error with ioremap in function\n");
  307. ret = -ENOMEM;
  308. goto error;
  309. }
  310. }
  311. if (!pp->va_cfg1_base) {
  312. pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
  313. pp->cfg1_base,
  314. pp->cfg1_size);
  315. if (!pp->va_cfg1_base) {
  316. dev_err(dev, "error with ioremap\n");
  317. ret = -ENOMEM;
  318. goto error;
  319. }
  320. }
  321. ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
  322. if (ret)
  323. pci->num_viewport = 2;
  324. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  325. if (!pp->ops->msi_host_init) {
  326. pp->irq_domain = irq_domain_add_linear(dev->of_node,
  327. MAX_MSI_IRQS, &msi_domain_ops,
  328. &dw_pcie_msi_chip);
  329. if (!pp->irq_domain) {
  330. dev_err(dev, "irq domain init failed\n");
  331. ret = -ENXIO;
  332. goto error;
  333. }
  334. for (i = 0; i < MAX_MSI_IRQS; i++)
  335. irq_create_mapping(pp->irq_domain, i);
  336. } else {
  337. ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
  338. if (ret < 0)
  339. goto error;
  340. }
  341. }
  342. if (pp->ops->host_init) {
  343. ret = pp->ops->host_init(pp);
  344. if (ret)
  345. goto error;
  346. }
  347. pp->root_bus_nr = pp->busn->start;
  348. bridge->dev.parent = dev;
  349. bridge->sysdata = pp;
  350. bridge->busnr = pp->root_bus_nr;
  351. bridge->ops = &dw_pcie_ops;
  352. bridge->map_irq = of_irq_parse_and_map_pci;
  353. bridge->swizzle_irq = pci_common_swizzle;
  354. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  355. bridge->msi = &dw_pcie_msi_chip;
  356. dw_pcie_msi_chip.dev = dev;
  357. }
  358. ret = pci_scan_root_bus_bridge(bridge);
  359. if (ret)
  360. goto error;
  361. bus = bridge->bus;
  362. if (pp->ops->scan_bus)
  363. pp->ops->scan_bus(pp);
  364. pci_bus_size_bridges(bus);
  365. pci_bus_assign_resources(bus);
  366. list_for_each_entry(child, &bus->children, node)
  367. pcie_bus_configure_settings(child);
  368. pci_bus_add_devices(bus);
  369. return 0;
  370. error:
  371. pci_free_host_bridge(bridge);
  372. return ret;
  373. }
  374. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  375. u32 devfn, int where, int size, u32 *val)
  376. {
  377. int ret, type;
  378. u32 busdev, cfg_size;
  379. u64 cpu_addr;
  380. void __iomem *va_cfg_base;
  381. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  382. if (pp->ops->rd_other_conf)
  383. return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
  384. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  385. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  386. if (bus->parent->number == pp->root_bus_nr) {
  387. type = PCIE_ATU_TYPE_CFG0;
  388. cpu_addr = pp->cfg0_base;
  389. cfg_size = pp->cfg0_size;
  390. va_cfg_base = pp->va_cfg0_base;
  391. } else {
  392. type = PCIE_ATU_TYPE_CFG1;
  393. cpu_addr = pp->cfg1_base;
  394. cfg_size = pp->cfg1_size;
  395. va_cfg_base = pp->va_cfg1_base;
  396. }
  397. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  398. type, cpu_addr,
  399. busdev, cfg_size);
  400. ret = dw_pcie_read(va_cfg_base + where, size, val);
  401. if (pci->num_viewport <= 2)
  402. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  403. PCIE_ATU_TYPE_IO, pp->io_base,
  404. pp->io_bus_addr, pp->io_size);
  405. return ret;
  406. }
  407. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  408. u32 devfn, int where, int size, u32 val)
  409. {
  410. int ret, type;
  411. u32 busdev, cfg_size;
  412. u64 cpu_addr;
  413. void __iomem *va_cfg_base;
  414. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  415. if (pp->ops->wr_other_conf)
  416. return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
  417. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  418. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  419. if (bus->parent->number == pp->root_bus_nr) {
  420. type = PCIE_ATU_TYPE_CFG0;
  421. cpu_addr = pp->cfg0_base;
  422. cfg_size = pp->cfg0_size;
  423. va_cfg_base = pp->va_cfg0_base;
  424. } else {
  425. type = PCIE_ATU_TYPE_CFG1;
  426. cpu_addr = pp->cfg1_base;
  427. cfg_size = pp->cfg1_size;
  428. va_cfg_base = pp->va_cfg1_base;
  429. }
  430. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  431. type, cpu_addr,
  432. busdev, cfg_size);
  433. ret = dw_pcie_write(va_cfg_base + where, size, val);
  434. if (pci->num_viewport <= 2)
  435. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  436. PCIE_ATU_TYPE_IO, pp->io_base,
  437. pp->io_bus_addr, pp->io_size);
  438. return ret;
  439. }
  440. static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
  441. int dev)
  442. {
  443. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  444. /* If there is no link, then there is no device */
  445. if (bus->number != pp->root_bus_nr) {
  446. if (!dw_pcie_link_up(pci))
  447. return 0;
  448. }
  449. /* access only one slot on each root port */
  450. if (bus->number == pp->root_bus_nr && dev > 0)
  451. return 0;
  452. return 1;
  453. }
  454. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  455. int size, u32 *val)
  456. {
  457. struct pcie_port *pp = bus->sysdata;
  458. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
  459. *val = 0xffffffff;
  460. return PCIBIOS_DEVICE_NOT_FOUND;
  461. }
  462. if (bus->number == pp->root_bus_nr)
  463. return dw_pcie_rd_own_conf(pp, where, size, val);
  464. return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
  465. }
  466. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  467. int where, int size, u32 val)
  468. {
  469. struct pcie_port *pp = bus->sysdata;
  470. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
  471. return PCIBIOS_DEVICE_NOT_FOUND;
  472. if (bus->number == pp->root_bus_nr)
  473. return dw_pcie_wr_own_conf(pp, where, size, val);
  474. return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
  475. }
  476. static struct pci_ops dw_pcie_ops = {
  477. .read = dw_pcie_rd_conf,
  478. .write = dw_pcie_wr_conf,
  479. };
  480. static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
  481. {
  482. u32 val;
  483. val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
  484. if (val == 0xffffffff)
  485. return 1;
  486. return 0;
  487. }
  488. void dw_pcie_setup_rc(struct pcie_port *pp)
  489. {
  490. u32 val;
  491. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  492. dw_pcie_setup(pci);
  493. /* setup RC BARs */
  494. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
  495. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
  496. /* setup interrupt pins */
  497. dw_pcie_dbi_ro_wr_en(pci);
  498. val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
  499. val &= 0xffff00ff;
  500. val |= 0x00000100;
  501. dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
  502. dw_pcie_dbi_ro_wr_dis(pci);
  503. /* setup bus numbers */
  504. val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
  505. val &= 0xff000000;
  506. val |= 0x00010100;
  507. dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
  508. /* setup command register */
  509. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  510. val &= 0xffff0000;
  511. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  512. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  513. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  514. /*
  515. * If the platform provides ->rd_other_conf, it means the platform
  516. * uses its own address translation component rather than ATU, so
  517. * we should not program the ATU here.
  518. */
  519. if (!pp->ops->rd_other_conf) {
  520. /* get iATU unroll support */
  521. pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
  522. dev_dbg(pci->dev, "iATU unroll: %s\n",
  523. pci->iatu_unroll_enabled ? "enabled" : "disabled");
  524. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
  525. PCIE_ATU_TYPE_MEM, pp->mem_base,
  526. pp->mem_bus_addr, pp->mem_size);
  527. if (pci->num_viewport > 2)
  528. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
  529. PCIE_ATU_TYPE_IO, pp->io_base,
  530. pp->io_bus_addr, pp->io_size);
  531. }
  532. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  533. /* Enable write permission for the DBI read-only register */
  534. dw_pcie_dbi_ro_wr_en(pci);
  535. /* program correct class for RC */
  536. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  537. /* Better disable write permission right after the update */
  538. dw_pcie_dbi_ro_wr_dis(pci);
  539. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  540. val |= PORT_LOGIC_SPEED_CHANGE;
  541. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  542. }