pcie-designware-host.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /*
  2. * Synopsys Designware PCIe host controller driver
  3. *
  4. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com
  6. *
  7. * Author: Jingoo Han <jg1.han@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/irqdomain.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_pci.h>
  16. #include <linux/pci_regs.h>
  17. #include <linux/platform_device.h>
  18. #include "pcie-designware.h"
  19. static struct pci_ops dw_pcie_ops;
  20. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  21. u32 *val)
  22. {
  23. struct dw_pcie *pci;
  24. if (pp->ops->rd_own_conf)
  25. return pp->ops->rd_own_conf(pp, where, size, val);
  26. pci = to_dw_pcie_from_pp(pp);
  27. return dw_pcie_read(pci->dbi_base + where, size, val);
  28. }
  29. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  30. u32 val)
  31. {
  32. struct dw_pcie *pci;
  33. if (pp->ops->wr_own_conf)
  34. return pp->ops->wr_own_conf(pp, where, size, val);
  35. pci = to_dw_pcie_from_pp(pp);
  36. return dw_pcie_write(pci->dbi_base + where, size, val);
  37. }
  38. static struct irq_chip dw_msi_irq_chip = {
  39. .name = "PCI-MSI",
  40. .irq_enable = pci_msi_unmask_irq,
  41. .irq_disable = pci_msi_mask_irq,
  42. .irq_mask = pci_msi_mask_irq,
  43. .irq_unmask = pci_msi_unmask_irq,
  44. };
  45. /* MSI int handler */
  46. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  47. {
  48. u32 val;
  49. int i, pos, irq;
  50. irqreturn_t ret = IRQ_NONE;
  51. for (i = 0; i < MAX_MSI_CTRLS; i++) {
  52. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  53. &val);
  54. if (!val)
  55. continue;
  56. ret = IRQ_HANDLED;
  57. pos = 0;
  58. while ((pos = find_next_bit((unsigned long *) &val, 32,
  59. pos)) != 32) {
  60. irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
  61. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
  62. 4, 1 << pos);
  63. generic_handle_irq(irq);
  64. pos++;
  65. }
  66. }
  67. return ret;
  68. }
  69. void dw_pcie_msi_init(struct pcie_port *pp)
  70. {
  71. u64 msi_target;
  72. pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
  73. msi_target = virt_to_phys((void *)pp->msi_data);
  74. /* program the msi_data */
  75. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  76. (u32)(msi_target & 0xffffffff));
  77. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  78. (u32)(msi_target >> 32 & 0xffffffff));
  79. }
  80. static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
  81. {
  82. unsigned int res, bit, val;
  83. res = (irq / 32) * 12;
  84. bit = irq % 32;
  85. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  86. val &= ~(1 << bit);
  87. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  88. }
  89. static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
  90. unsigned int nvec, unsigned int pos)
  91. {
  92. unsigned int i;
  93. for (i = 0; i < nvec; i++) {
  94. irq_set_msi_desc_off(irq_base, i, NULL);
  95. /* Disable corresponding interrupt on MSI controller */
  96. if (pp->ops->msi_clear_irq)
  97. pp->ops->msi_clear_irq(pp, pos + i);
  98. else
  99. dw_pcie_msi_clear_irq(pp, pos + i);
  100. }
  101. bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
  102. }
  103. static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
  104. {
  105. unsigned int res, bit, val;
  106. res = (irq / 32) * 12;
  107. bit = irq % 32;
  108. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  109. val |= 1 << bit;
  110. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  111. }
  112. static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
  113. {
  114. int irq, pos0, i;
  115. struct pcie_port *pp;
  116. pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
  117. pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
  118. order_base_2(no_irqs));
  119. if (pos0 < 0)
  120. goto no_valid_irq;
  121. irq = irq_find_mapping(pp->irq_domain, pos0);
  122. if (!irq)
  123. goto no_valid_irq;
  124. /*
  125. * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
  126. * descs so there is no need to allocate descs here. We can therefore
  127. * assume that if irq_find_mapping above returns non-zero, then the
  128. * descs are also successfully allocated.
  129. */
  130. for (i = 0; i < no_irqs; i++) {
  131. if (irq_set_msi_desc_off(irq, i, desc) != 0) {
  132. clear_irq_range(pp, irq, i, pos0);
  133. goto no_valid_irq;
  134. }
  135. /*Enable corresponding interrupt in MSI interrupt controller */
  136. if (pp->ops->msi_set_irq)
  137. pp->ops->msi_set_irq(pp, pos0 + i);
  138. else
  139. dw_pcie_msi_set_irq(pp, pos0 + i);
  140. }
  141. *pos = pos0;
  142. desc->nvec_used = no_irqs;
  143. desc->msi_attrib.multiple = order_base_2(no_irqs);
  144. return irq;
  145. no_valid_irq:
  146. *pos = pos0;
  147. return -ENOSPC;
  148. }
  149. static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
  150. {
  151. struct msi_msg msg;
  152. u64 msi_target;
  153. if (pp->ops->get_msi_addr)
  154. msi_target = pp->ops->get_msi_addr(pp);
  155. else
  156. msi_target = virt_to_phys((void *)pp->msi_data);
  157. msg.address_lo = (u32)(msi_target & 0xffffffff);
  158. msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
  159. if (pp->ops->get_msi_data)
  160. msg.data = pp->ops->get_msi_data(pp, pos);
  161. else
  162. msg.data = pos;
  163. pci_write_msi_msg(irq, &msg);
  164. }
  165. static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  166. struct msi_desc *desc)
  167. {
  168. int irq, pos;
  169. struct pcie_port *pp = pdev->bus->sysdata;
  170. if (desc->msi_attrib.is_msix)
  171. return -EINVAL;
  172. irq = assign_irq(1, desc, &pos);
  173. if (irq < 0)
  174. return irq;
  175. dw_msi_setup_msg(pp, irq, pos);
  176. return 0;
  177. }
  178. static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
  179. int nvec, int type)
  180. {
  181. #ifdef CONFIG_PCI_MSI
  182. int irq, pos;
  183. struct msi_desc *desc;
  184. struct pcie_port *pp = pdev->bus->sysdata;
  185. /* MSI-X interrupts are not supported */
  186. if (type == PCI_CAP_ID_MSIX)
  187. return -EINVAL;
  188. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  189. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  190. irq = assign_irq(nvec, desc, &pos);
  191. if (irq < 0)
  192. return irq;
  193. dw_msi_setup_msg(pp, irq, pos);
  194. return 0;
  195. #else
  196. return -EINVAL;
  197. #endif
  198. }
  199. static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  200. {
  201. struct irq_data *data = irq_get_irq_data(irq);
  202. struct msi_desc *msi = irq_data_get_msi_desc(data);
  203. struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
  204. clear_irq_range(pp, irq, 1, data->hwirq);
  205. }
  206. static struct msi_controller dw_pcie_msi_chip = {
  207. .setup_irq = dw_msi_setup_irq,
  208. .setup_irqs = dw_msi_setup_irqs,
  209. .teardown_irq = dw_msi_teardown_irq,
  210. };
  211. static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  212. irq_hw_number_t hwirq)
  213. {
  214. irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
  215. irq_set_chip_data(irq, domain->host_data);
  216. return 0;
  217. }
  218. static const struct irq_domain_ops msi_domain_ops = {
  219. .map = dw_pcie_msi_map,
  220. };
  221. int dw_pcie_host_init(struct pcie_port *pp)
  222. {
  223. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  224. struct device *dev = pci->dev;
  225. struct device_node *np = dev->of_node;
  226. struct platform_device *pdev = to_platform_device(dev);
  227. struct pci_bus *bus, *child;
  228. struct resource *cfg_res;
  229. int i, ret;
  230. LIST_HEAD(res);
  231. struct resource_entry *win, *tmp;
  232. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  233. if (cfg_res) {
  234. pp->cfg0_size = resource_size(cfg_res) / 2;
  235. pp->cfg1_size = resource_size(cfg_res) / 2;
  236. pp->cfg0_base = cfg_res->start;
  237. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  238. } else if (!pp->va_cfg0_base) {
  239. dev_err(dev, "missing *config* reg space\n");
  240. }
  241. ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
  242. if (ret)
  243. return ret;
  244. ret = devm_request_pci_bus_resources(dev, &res);
  245. if (ret)
  246. goto error;
  247. /* Get the I/O and memory ranges from DT */
  248. resource_list_for_each_entry_safe(win, tmp, &res) {
  249. switch (resource_type(win->res)) {
  250. case IORESOURCE_IO:
  251. ret = pci_remap_iospace(win->res, pp->io_base);
  252. if (ret) {
  253. dev_warn(dev, "error %d: failed to map resource %pR\n",
  254. ret, win->res);
  255. resource_list_destroy_entry(win);
  256. } else {
  257. pp->io = win->res;
  258. pp->io->name = "I/O";
  259. pp->io_size = resource_size(pp->io);
  260. pp->io_bus_addr = pp->io->start - win->offset;
  261. }
  262. break;
  263. case IORESOURCE_MEM:
  264. pp->mem = win->res;
  265. pp->mem->name = "MEM";
  266. pp->mem_size = resource_size(pp->mem);
  267. pp->mem_bus_addr = pp->mem->start - win->offset;
  268. break;
  269. case 0:
  270. pp->cfg = win->res;
  271. pp->cfg0_size = resource_size(pp->cfg) / 2;
  272. pp->cfg1_size = resource_size(pp->cfg) / 2;
  273. pp->cfg0_base = pp->cfg->start;
  274. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  275. break;
  276. case IORESOURCE_BUS:
  277. pp->busn = win->res;
  278. break;
  279. }
  280. }
  281. if (!pci->dbi_base) {
  282. pci->dbi_base = devm_pci_remap_cfgspace(dev,
  283. pp->cfg->start,
  284. resource_size(pp->cfg));
  285. if (!pci->dbi_base) {
  286. dev_err(dev, "error with ioremap\n");
  287. ret = -ENOMEM;
  288. goto error;
  289. }
  290. }
  291. pp->mem_base = pp->mem->start;
  292. if (!pp->va_cfg0_base) {
  293. pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
  294. pp->cfg0_base, pp->cfg0_size);
  295. if (!pp->va_cfg0_base) {
  296. dev_err(dev, "error with ioremap in function\n");
  297. ret = -ENOMEM;
  298. goto error;
  299. }
  300. }
  301. if (!pp->va_cfg1_base) {
  302. pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
  303. pp->cfg1_base,
  304. pp->cfg1_size);
  305. if (!pp->va_cfg1_base) {
  306. dev_err(dev, "error with ioremap\n");
  307. ret = -ENOMEM;
  308. goto error;
  309. }
  310. }
  311. ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
  312. if (ret)
  313. pci->num_viewport = 2;
  314. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  315. if (!pp->ops->msi_host_init) {
  316. pp->irq_domain = irq_domain_add_linear(dev->of_node,
  317. MAX_MSI_IRQS, &msi_domain_ops,
  318. &dw_pcie_msi_chip);
  319. if (!pp->irq_domain) {
  320. dev_err(dev, "irq domain init failed\n");
  321. ret = -ENXIO;
  322. goto error;
  323. }
  324. for (i = 0; i < MAX_MSI_IRQS; i++)
  325. irq_create_mapping(pp->irq_domain, i);
  326. } else {
  327. ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
  328. if (ret < 0)
  329. goto error;
  330. }
  331. }
  332. if (pp->ops->host_init)
  333. pp->ops->host_init(pp);
  334. pp->root_bus_nr = pp->busn->start;
  335. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  336. bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
  337. &dw_pcie_ops, pp, &res,
  338. &dw_pcie_msi_chip);
  339. dw_pcie_msi_chip.dev = dev;
  340. } else
  341. bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
  342. pp, &res);
  343. if (!bus) {
  344. ret = -ENOMEM;
  345. goto error;
  346. }
  347. if (pp->ops->scan_bus)
  348. pp->ops->scan_bus(pp);
  349. #ifdef CONFIG_ARM
  350. /* support old dtbs that incorrectly describe IRQs */
  351. pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
  352. #endif
  353. pci_bus_size_bridges(bus);
  354. pci_bus_assign_resources(bus);
  355. list_for_each_entry(child, &bus->children, node)
  356. pcie_bus_configure_settings(child);
  357. pci_bus_add_devices(bus);
  358. return 0;
  359. error:
  360. pci_free_resource_list(&res);
  361. return ret;
  362. }
  363. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  364. u32 devfn, int where, int size, u32 *val)
  365. {
  366. int ret, type;
  367. u32 busdev, cfg_size;
  368. u64 cpu_addr;
  369. void __iomem *va_cfg_base;
  370. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  371. if (pp->ops->rd_other_conf)
  372. return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
  373. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  374. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  375. if (bus->parent->number == pp->root_bus_nr) {
  376. type = PCIE_ATU_TYPE_CFG0;
  377. cpu_addr = pp->cfg0_base;
  378. cfg_size = pp->cfg0_size;
  379. va_cfg_base = pp->va_cfg0_base;
  380. } else {
  381. type = PCIE_ATU_TYPE_CFG1;
  382. cpu_addr = pp->cfg1_base;
  383. cfg_size = pp->cfg1_size;
  384. va_cfg_base = pp->va_cfg1_base;
  385. }
  386. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  387. type, cpu_addr,
  388. busdev, cfg_size);
  389. ret = dw_pcie_read(va_cfg_base + where, size, val);
  390. if (pci->num_viewport <= 2)
  391. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  392. PCIE_ATU_TYPE_IO, pp->io_base,
  393. pp->io_bus_addr, pp->io_size);
  394. return ret;
  395. }
  396. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  397. u32 devfn, int where, int size, u32 val)
  398. {
  399. int ret, type;
  400. u32 busdev, cfg_size;
  401. u64 cpu_addr;
  402. void __iomem *va_cfg_base;
  403. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  404. if (pp->ops->wr_other_conf)
  405. return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
  406. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  407. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  408. if (bus->parent->number == pp->root_bus_nr) {
  409. type = PCIE_ATU_TYPE_CFG0;
  410. cpu_addr = pp->cfg0_base;
  411. cfg_size = pp->cfg0_size;
  412. va_cfg_base = pp->va_cfg0_base;
  413. } else {
  414. type = PCIE_ATU_TYPE_CFG1;
  415. cpu_addr = pp->cfg1_base;
  416. cfg_size = pp->cfg1_size;
  417. va_cfg_base = pp->va_cfg1_base;
  418. }
  419. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  420. type, cpu_addr,
  421. busdev, cfg_size);
  422. ret = dw_pcie_write(va_cfg_base + where, size, val);
  423. if (pci->num_viewport <= 2)
  424. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  425. PCIE_ATU_TYPE_IO, pp->io_base,
  426. pp->io_bus_addr, pp->io_size);
  427. return ret;
  428. }
  429. static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
  430. int dev)
  431. {
  432. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  433. /* If there is no link, then there is no device */
  434. if (bus->number != pp->root_bus_nr) {
  435. if (!dw_pcie_link_up(pci))
  436. return 0;
  437. }
  438. /* access only one slot on each root port */
  439. if (bus->number == pp->root_bus_nr && dev > 0)
  440. return 0;
  441. return 1;
  442. }
  443. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  444. int size, u32 *val)
  445. {
  446. struct pcie_port *pp = bus->sysdata;
  447. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
  448. *val = 0xffffffff;
  449. return PCIBIOS_DEVICE_NOT_FOUND;
  450. }
  451. if (bus->number == pp->root_bus_nr)
  452. return dw_pcie_rd_own_conf(pp, where, size, val);
  453. return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
  454. }
  455. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  456. int where, int size, u32 val)
  457. {
  458. struct pcie_port *pp = bus->sysdata;
  459. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
  460. return PCIBIOS_DEVICE_NOT_FOUND;
  461. if (bus->number == pp->root_bus_nr)
  462. return dw_pcie_wr_own_conf(pp, where, size, val);
  463. return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
  464. }
  465. static struct pci_ops dw_pcie_ops = {
  466. .read = dw_pcie_rd_conf,
  467. .write = dw_pcie_wr_conf,
  468. };
  469. static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
  470. {
  471. u32 val;
  472. val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
  473. if (val == 0xffffffff)
  474. return 1;
  475. return 0;
  476. }
  477. void dw_pcie_setup_rc(struct pcie_port *pp)
  478. {
  479. u32 val;
  480. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  481. dw_pcie_setup(pci);
  482. /* setup RC BARs */
  483. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
  484. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
  485. /* setup interrupt pins */
  486. val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
  487. val &= 0xffff00ff;
  488. val |= 0x00000100;
  489. dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
  490. /* setup bus numbers */
  491. val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
  492. val &= 0xff000000;
  493. val |= 0x00010100;
  494. dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
  495. /* setup command register */
  496. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  497. val &= 0xffff0000;
  498. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  499. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  500. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  501. /*
  502. * If the platform provides ->rd_other_conf, it means the platform
  503. * uses its own address translation component rather than ATU, so
  504. * we should not program the ATU here.
  505. */
  506. if (!pp->ops->rd_other_conf) {
  507. /* get iATU unroll support */
  508. pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
  509. dev_dbg(pci->dev, "iATU unroll: %s\n",
  510. pci->iatu_unroll_enabled ? "enabled" : "disabled");
  511. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
  512. PCIE_ATU_TYPE_MEM, pp->mem_base,
  513. pp->mem_bus_addr, pp->mem_size);
  514. if (pci->num_viewport > 2)
  515. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
  516. PCIE_ATU_TYPE_IO, pp->io_base,
  517. pp->io_bus_addr, pp->io_size);
  518. }
  519. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  520. /* program correct class for RC */
  521. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  522. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  523. val |= PORT_LOGIC_SPEED_CHANGE;
  524. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  525. }