pcie-designware-host.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * Synopsys DesignWare PCIe host controller driver
  3. *
  4. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com
  6. *
  7. * Author: Jingoo Han <jg1.han@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/irqdomain.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_pci.h>
  16. #include <linux/pci_regs.h>
  17. #include <linux/platform_device.h>
  18. #include "pcie-designware.h"
  19. static struct pci_ops dw_pcie_ops;
  20. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  21. u32 *val)
  22. {
  23. struct dw_pcie *pci;
  24. if (pp->ops->rd_own_conf)
  25. return pp->ops->rd_own_conf(pp, where, size, val);
  26. pci = to_dw_pcie_from_pp(pp);
  27. return dw_pcie_read(pci->dbi_base + where, size, val);
  28. }
  29. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  30. u32 val)
  31. {
  32. struct dw_pcie *pci;
  33. if (pp->ops->wr_own_conf)
  34. return pp->ops->wr_own_conf(pp, where, size, val);
  35. pci = to_dw_pcie_from_pp(pp);
  36. return dw_pcie_write(pci->dbi_base + where, size, val);
  37. }
  38. static struct irq_chip dw_msi_irq_chip = {
  39. .name = "PCI-MSI",
  40. .irq_enable = pci_msi_unmask_irq,
  41. .irq_disable = pci_msi_mask_irq,
  42. .irq_mask = pci_msi_mask_irq,
  43. .irq_unmask = pci_msi_unmask_irq,
  44. };
  45. /* MSI int handler */
  46. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  47. {
  48. u32 val;
  49. int i, pos, irq;
  50. irqreturn_t ret = IRQ_NONE;
  51. for (i = 0; i < MAX_MSI_CTRLS; i++) {
  52. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  53. &val);
  54. if (!val)
  55. continue;
  56. ret = IRQ_HANDLED;
  57. pos = 0;
  58. while ((pos = find_next_bit((unsigned long *) &val, 32,
  59. pos)) != 32) {
  60. irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
  61. generic_handle_irq(irq);
  62. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
  63. 4, 1 << pos);
  64. pos++;
  65. }
  66. }
  67. return ret;
  68. }
  69. void dw_pcie_msi_init(struct pcie_port *pp)
  70. {
  71. u64 msi_target;
  72. pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
  73. msi_target = virt_to_phys((void *)pp->msi_data);
  74. /* program the msi_data */
  75. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  76. (u32)(msi_target & 0xffffffff));
  77. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  78. (u32)(msi_target >> 32 & 0xffffffff));
  79. }
  80. static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
  81. {
  82. unsigned int res, bit, val;
  83. res = (irq / 32) * 12;
  84. bit = irq % 32;
  85. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  86. val &= ~(1 << bit);
  87. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  88. }
  89. static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
  90. unsigned int nvec, unsigned int pos)
  91. {
  92. unsigned int i;
  93. for (i = 0; i < nvec; i++) {
  94. irq_set_msi_desc_off(irq_base, i, NULL);
  95. /* Disable corresponding interrupt on MSI controller */
  96. if (pp->ops->msi_clear_irq)
  97. pp->ops->msi_clear_irq(pp, pos + i);
  98. else
  99. dw_pcie_msi_clear_irq(pp, pos + i);
  100. }
  101. bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
  102. }
  103. static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
  104. {
  105. unsigned int res, bit, val;
  106. res = (irq / 32) * 12;
  107. bit = irq % 32;
  108. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  109. val |= 1 << bit;
  110. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  111. }
  112. static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
  113. {
  114. int irq, pos0, i;
  115. struct pcie_port *pp;
  116. pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
  117. pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
  118. order_base_2(no_irqs));
  119. if (pos0 < 0)
  120. goto no_valid_irq;
  121. irq = irq_find_mapping(pp->irq_domain, pos0);
  122. if (!irq)
  123. goto no_valid_irq;
  124. /*
  125. * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
  126. * descs so there is no need to allocate descs here. We can therefore
  127. * assume that if irq_find_mapping above returns non-zero, then the
  128. * descs are also successfully allocated.
  129. */
  130. for (i = 0; i < no_irqs; i++) {
  131. if (irq_set_msi_desc_off(irq, i, desc) != 0) {
  132. clear_irq_range(pp, irq, i, pos0);
  133. goto no_valid_irq;
  134. }
  135. /*Enable corresponding interrupt in MSI interrupt controller */
  136. if (pp->ops->msi_set_irq)
  137. pp->ops->msi_set_irq(pp, pos0 + i);
  138. else
  139. dw_pcie_msi_set_irq(pp, pos0 + i);
  140. }
  141. *pos = pos0;
  142. desc->nvec_used = no_irqs;
  143. desc->msi_attrib.multiple = order_base_2(no_irqs);
  144. return irq;
  145. no_valid_irq:
  146. *pos = pos0;
  147. return -ENOSPC;
  148. }
  149. static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
  150. {
  151. struct msi_msg msg;
  152. u64 msi_target;
  153. if (pp->ops->get_msi_addr)
  154. msi_target = pp->ops->get_msi_addr(pp);
  155. else
  156. msi_target = virt_to_phys((void *)pp->msi_data);
  157. msg.address_lo = (u32)(msi_target & 0xffffffff);
  158. msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
  159. if (pp->ops->get_msi_data)
  160. msg.data = pp->ops->get_msi_data(pp, pos);
  161. else
  162. msg.data = pos;
  163. pci_write_msi_msg(irq, &msg);
  164. }
  165. static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  166. struct msi_desc *desc)
  167. {
  168. int irq, pos;
  169. struct pcie_port *pp = pdev->bus->sysdata;
  170. if (desc->msi_attrib.is_msix)
  171. return -EINVAL;
  172. irq = assign_irq(1, desc, &pos);
  173. if (irq < 0)
  174. return irq;
  175. dw_msi_setup_msg(pp, irq, pos);
  176. return 0;
  177. }
  178. static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
  179. int nvec, int type)
  180. {
  181. #ifdef CONFIG_PCI_MSI
  182. int irq, pos;
  183. struct msi_desc *desc;
  184. struct pcie_port *pp = pdev->bus->sysdata;
  185. /* MSI-X interrupts are not supported */
  186. if (type == PCI_CAP_ID_MSIX)
  187. return -EINVAL;
  188. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  189. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  190. irq = assign_irq(nvec, desc, &pos);
  191. if (irq < 0)
  192. return irq;
  193. dw_msi_setup_msg(pp, irq, pos);
  194. return 0;
  195. #else
  196. return -EINVAL;
  197. #endif
  198. }
  199. static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  200. {
  201. struct irq_data *data = irq_get_irq_data(irq);
  202. struct msi_desc *msi = irq_data_get_msi_desc(data);
  203. struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
  204. clear_irq_range(pp, irq, 1, data->hwirq);
  205. }
  206. static struct msi_controller dw_pcie_msi_chip = {
  207. .setup_irq = dw_msi_setup_irq,
  208. .setup_irqs = dw_msi_setup_irqs,
  209. .teardown_irq = dw_msi_teardown_irq,
  210. };
  211. static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  212. irq_hw_number_t hwirq)
  213. {
  214. irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
  215. irq_set_chip_data(irq, domain->host_data);
  216. return 0;
  217. }
  218. static const struct irq_domain_ops msi_domain_ops = {
  219. .map = dw_pcie_msi_map,
  220. };
  221. int dw_pcie_host_init(struct pcie_port *pp)
  222. {
  223. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  224. struct device *dev = pci->dev;
  225. struct device_node *np = dev->of_node;
  226. struct platform_device *pdev = to_platform_device(dev);
  227. struct pci_bus *bus, *child;
  228. struct pci_host_bridge *bridge;
  229. struct resource *cfg_res;
  230. int i, ret;
  231. struct resource_entry *win, *tmp;
  232. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  233. if (cfg_res) {
  234. pp->cfg0_size = resource_size(cfg_res) / 2;
  235. pp->cfg1_size = resource_size(cfg_res) / 2;
  236. pp->cfg0_base = cfg_res->start;
  237. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  238. } else if (!pp->va_cfg0_base) {
  239. dev_err(dev, "missing *config* reg space\n");
  240. }
  241. bridge = pci_alloc_host_bridge(0);
  242. if (!bridge)
  243. return -ENOMEM;
  244. ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
  245. &bridge->windows, &pp->io_base);
  246. if (ret)
  247. return ret;
  248. ret = devm_request_pci_bus_resources(dev, &bridge->windows);
  249. if (ret)
  250. goto error;
  251. /* Get the I/O and memory ranges from DT */
  252. resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
  253. switch (resource_type(win->res)) {
  254. case IORESOURCE_IO:
  255. ret = pci_remap_iospace(win->res, pp->io_base);
  256. if (ret) {
  257. dev_warn(dev, "error %d: failed to map resource %pR\n",
  258. ret, win->res);
  259. resource_list_destroy_entry(win);
  260. } else {
  261. pp->io = win->res;
  262. pp->io->name = "I/O";
  263. pp->io_size = resource_size(pp->io);
  264. pp->io_bus_addr = pp->io->start - win->offset;
  265. }
  266. break;
  267. case IORESOURCE_MEM:
  268. pp->mem = win->res;
  269. pp->mem->name = "MEM";
  270. pp->mem_size = resource_size(pp->mem);
  271. pp->mem_bus_addr = pp->mem->start - win->offset;
  272. break;
  273. case 0:
  274. pp->cfg = win->res;
  275. pp->cfg0_size = resource_size(pp->cfg) / 2;
  276. pp->cfg1_size = resource_size(pp->cfg) / 2;
  277. pp->cfg0_base = pp->cfg->start;
  278. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  279. break;
  280. case IORESOURCE_BUS:
  281. pp->busn = win->res;
  282. break;
  283. }
  284. }
  285. if (!pci->dbi_base) {
  286. pci->dbi_base = devm_pci_remap_cfgspace(dev,
  287. pp->cfg->start,
  288. resource_size(pp->cfg));
  289. if (!pci->dbi_base) {
  290. dev_err(dev, "error with ioremap\n");
  291. ret = -ENOMEM;
  292. goto error;
  293. }
  294. }
  295. pp->mem_base = pp->mem->start;
  296. if (!pp->va_cfg0_base) {
  297. pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
  298. pp->cfg0_base, pp->cfg0_size);
  299. if (!pp->va_cfg0_base) {
  300. dev_err(dev, "error with ioremap in function\n");
  301. ret = -ENOMEM;
  302. goto error;
  303. }
  304. }
  305. if (!pp->va_cfg1_base) {
  306. pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
  307. pp->cfg1_base,
  308. pp->cfg1_size);
  309. if (!pp->va_cfg1_base) {
  310. dev_err(dev, "error with ioremap\n");
  311. ret = -ENOMEM;
  312. goto error;
  313. }
  314. }
  315. ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
  316. if (ret)
  317. pci->num_viewport = 2;
  318. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  319. if (!pp->ops->msi_host_init) {
  320. pp->irq_domain = irq_domain_add_linear(dev->of_node,
  321. MAX_MSI_IRQS, &msi_domain_ops,
  322. &dw_pcie_msi_chip);
  323. if (!pp->irq_domain) {
  324. dev_err(dev, "irq domain init failed\n");
  325. ret = -ENXIO;
  326. goto error;
  327. }
  328. for (i = 0; i < MAX_MSI_IRQS; i++)
  329. irq_create_mapping(pp->irq_domain, i);
  330. } else {
  331. ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
  332. if (ret < 0)
  333. goto error;
  334. }
  335. }
  336. if (pp->ops->host_init) {
  337. ret = pp->ops->host_init(pp);
  338. if (ret)
  339. goto error;
  340. }
  341. pp->root_bus_nr = pp->busn->start;
  342. bridge->dev.parent = dev;
  343. bridge->sysdata = pp;
  344. bridge->busnr = pp->root_bus_nr;
  345. bridge->ops = &dw_pcie_ops;
  346. bridge->map_irq = of_irq_parse_and_map_pci;
  347. bridge->swizzle_irq = pci_common_swizzle;
  348. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  349. bridge->msi = &dw_pcie_msi_chip;
  350. dw_pcie_msi_chip.dev = dev;
  351. }
  352. ret = pci_scan_root_bus_bridge(bridge);
  353. if (ret)
  354. goto error;
  355. bus = bridge->bus;
  356. if (pp->ops->scan_bus)
  357. pp->ops->scan_bus(pp);
  358. pci_bus_size_bridges(bus);
  359. pci_bus_assign_resources(bus);
  360. list_for_each_entry(child, &bus->children, node)
  361. pcie_bus_configure_settings(child);
  362. pci_bus_add_devices(bus);
  363. return 0;
  364. error:
  365. pci_free_host_bridge(bridge);
  366. return ret;
  367. }
  368. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  369. u32 devfn, int where, int size, u32 *val)
  370. {
  371. int ret, type;
  372. u32 busdev, cfg_size;
  373. u64 cpu_addr;
  374. void __iomem *va_cfg_base;
  375. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  376. if (pp->ops->rd_other_conf)
  377. return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
  378. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  379. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  380. if (bus->parent->number == pp->root_bus_nr) {
  381. type = PCIE_ATU_TYPE_CFG0;
  382. cpu_addr = pp->cfg0_base;
  383. cfg_size = pp->cfg0_size;
  384. va_cfg_base = pp->va_cfg0_base;
  385. } else {
  386. type = PCIE_ATU_TYPE_CFG1;
  387. cpu_addr = pp->cfg1_base;
  388. cfg_size = pp->cfg1_size;
  389. va_cfg_base = pp->va_cfg1_base;
  390. }
  391. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  392. type, cpu_addr,
  393. busdev, cfg_size);
  394. ret = dw_pcie_read(va_cfg_base + where, size, val);
  395. if (pci->num_viewport <= 2)
  396. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  397. PCIE_ATU_TYPE_IO, pp->io_base,
  398. pp->io_bus_addr, pp->io_size);
  399. return ret;
  400. }
  401. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  402. u32 devfn, int where, int size, u32 val)
  403. {
  404. int ret, type;
  405. u32 busdev, cfg_size;
  406. u64 cpu_addr;
  407. void __iomem *va_cfg_base;
  408. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  409. if (pp->ops->wr_other_conf)
  410. return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
  411. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  412. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  413. if (bus->parent->number == pp->root_bus_nr) {
  414. type = PCIE_ATU_TYPE_CFG0;
  415. cpu_addr = pp->cfg0_base;
  416. cfg_size = pp->cfg0_size;
  417. va_cfg_base = pp->va_cfg0_base;
  418. } else {
  419. type = PCIE_ATU_TYPE_CFG1;
  420. cpu_addr = pp->cfg1_base;
  421. cfg_size = pp->cfg1_size;
  422. va_cfg_base = pp->va_cfg1_base;
  423. }
  424. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  425. type, cpu_addr,
  426. busdev, cfg_size);
  427. ret = dw_pcie_write(va_cfg_base + where, size, val);
  428. if (pci->num_viewport <= 2)
  429. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  430. PCIE_ATU_TYPE_IO, pp->io_base,
  431. pp->io_bus_addr, pp->io_size);
  432. return ret;
  433. }
  434. static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
  435. int dev)
  436. {
  437. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  438. /* If there is no link, then there is no device */
  439. if (bus->number != pp->root_bus_nr) {
  440. if (!dw_pcie_link_up(pci))
  441. return 0;
  442. }
  443. /* access only one slot on each root port */
  444. if (bus->number == pp->root_bus_nr && dev > 0)
  445. return 0;
  446. return 1;
  447. }
  448. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  449. int size, u32 *val)
  450. {
  451. struct pcie_port *pp = bus->sysdata;
  452. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
  453. *val = 0xffffffff;
  454. return PCIBIOS_DEVICE_NOT_FOUND;
  455. }
  456. if (bus->number == pp->root_bus_nr)
  457. return dw_pcie_rd_own_conf(pp, where, size, val);
  458. return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
  459. }
  460. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  461. int where, int size, u32 val)
  462. {
  463. struct pcie_port *pp = bus->sysdata;
  464. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
  465. return PCIBIOS_DEVICE_NOT_FOUND;
  466. if (bus->number == pp->root_bus_nr)
  467. return dw_pcie_wr_own_conf(pp, where, size, val);
  468. return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
  469. }
  470. static struct pci_ops dw_pcie_ops = {
  471. .read = dw_pcie_rd_conf,
  472. .write = dw_pcie_wr_conf,
  473. };
  474. static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
  475. {
  476. u32 val;
  477. val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
  478. if (val == 0xffffffff)
  479. return 1;
  480. return 0;
  481. }
  482. void dw_pcie_setup_rc(struct pcie_port *pp)
  483. {
  484. u32 val;
  485. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  486. dw_pcie_setup(pci);
  487. /* setup RC BARs */
  488. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
  489. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
  490. /* setup interrupt pins */
  491. dw_pcie_dbi_ro_wr_en(pci);
  492. val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
  493. val &= 0xffff00ff;
  494. val |= 0x00000100;
  495. dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
  496. dw_pcie_dbi_ro_wr_dis(pci);
  497. /* setup bus numbers */
  498. val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
  499. val &= 0xff000000;
  500. val |= 0x00010100;
  501. dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
  502. /* setup command register */
  503. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  504. val &= 0xffff0000;
  505. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  506. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  507. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  508. /*
  509. * If the platform provides ->rd_other_conf, it means the platform
  510. * uses its own address translation component rather than ATU, so
  511. * we should not program the ATU here.
  512. */
  513. if (!pp->ops->rd_other_conf) {
  514. /* get iATU unroll support */
  515. pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
  516. dev_dbg(pci->dev, "iATU unroll: %s\n",
  517. pci->iatu_unroll_enabled ? "enabled" : "disabled");
  518. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
  519. PCIE_ATU_TYPE_MEM, pp->mem_base,
  520. pp->mem_bus_addr, pp->mem_size);
  521. if (pci->num_viewport > 2)
  522. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
  523. PCIE_ATU_TYPE_IO, pp->io_base,
  524. pp->io_bus_addr, pp->io_size);
  525. }
  526. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  527. /* Enable write permission for the DBI read-only register */
  528. dw_pcie_dbi_ro_wr_en(pci);
  529. /* program correct class for RC */
  530. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  531. /* Better disable write permission right after the update */
  532. dw_pcie_dbi_ro_wr_dis(pci);
  533. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  534. val |= PORT_LOGIC_SPEED_CHANGE;
  535. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  536. }