pcie-designware-host.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Synopsys DesignWare PCIe host controller driver
  4. *
  5. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com
  7. *
  8. * Author: Jingoo Han <jg1.han@samsung.com>
  9. */
  10. #include <linux/irqchip/chained_irq.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_pci.h>
  14. #include <linux/pci_regs.h>
  15. #include <linux/platform_device.h>
  16. #include "../../pci.h"
  17. #include "pcie-designware.h"
  18. static struct pci_ops dw_pcie_ops;
  19. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  20. u32 *val)
  21. {
  22. struct dw_pcie *pci;
  23. if (pp->ops->rd_own_conf)
  24. return pp->ops->rd_own_conf(pp, where, size, val);
  25. pci = to_dw_pcie_from_pp(pp);
  26. return dw_pcie_read(pci->dbi_base + where, size, val);
  27. }
  28. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  29. u32 val)
  30. {
  31. struct dw_pcie *pci;
  32. if (pp->ops->wr_own_conf)
  33. return pp->ops->wr_own_conf(pp, where, size, val);
  34. pci = to_dw_pcie_from_pp(pp);
  35. return dw_pcie_write(pci->dbi_base + where, size, val);
  36. }
  37. static void dw_msi_ack_irq(struct irq_data *d)
  38. {
  39. irq_chip_ack_parent(d);
  40. }
  41. static void dw_msi_mask_irq(struct irq_data *d)
  42. {
  43. pci_msi_mask_irq(d);
  44. irq_chip_mask_parent(d);
  45. }
  46. static void dw_msi_unmask_irq(struct irq_data *d)
  47. {
  48. pci_msi_unmask_irq(d);
  49. irq_chip_unmask_parent(d);
  50. }
  51. static struct irq_chip dw_pcie_msi_irq_chip = {
  52. .name = "PCI-MSI",
  53. .irq_ack = dw_msi_ack_irq,
  54. .irq_mask = dw_msi_mask_irq,
  55. .irq_unmask = dw_msi_unmask_irq,
  56. };
  57. static struct msi_domain_info dw_pcie_msi_domain_info = {
  58. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  59. MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
  60. .chip = &dw_pcie_msi_irq_chip,
  61. };
  62. /* MSI int handler */
  63. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  64. {
  65. int i, pos, irq;
  66. u32 val, num_ctrls;
  67. irqreturn_t ret = IRQ_NONE;
  68. num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  69. for (i = 0; i < num_ctrls; i++) {
  70. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
  71. (i * MSI_REG_CTRL_BLOCK_SIZE),
  72. 4, &val);
  73. if (!val)
  74. continue;
  75. ret = IRQ_HANDLED;
  76. pos = 0;
  77. while ((pos = find_next_bit((unsigned long *) &val,
  78. MAX_MSI_IRQS_PER_CTRL,
  79. pos)) != MAX_MSI_IRQS_PER_CTRL) {
  80. irq = irq_find_mapping(pp->irq_domain,
  81. (i * MAX_MSI_IRQS_PER_CTRL) +
  82. pos);
  83. generic_handle_irq(irq);
  84. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
  85. (i * MSI_REG_CTRL_BLOCK_SIZE),
  86. 4, 1 << pos);
  87. pos++;
  88. }
  89. }
  90. return ret;
  91. }
  92. /* Chained MSI interrupt service routine */
  93. static void dw_chained_msi_isr(struct irq_desc *desc)
  94. {
  95. struct irq_chip *chip = irq_desc_get_chip(desc);
  96. struct pcie_port *pp;
  97. chained_irq_enter(chip, desc);
  98. pp = irq_desc_get_handler_data(desc);
  99. dw_handle_msi_irq(pp);
  100. chained_irq_exit(chip, desc);
  101. }
  102. static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
  103. {
  104. struct pcie_port *pp = irq_data_get_irq_chip_data(data);
  105. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  106. u64 msi_target;
  107. if (pp->ops->get_msi_addr)
  108. msi_target = pp->ops->get_msi_addr(pp);
  109. else
  110. msi_target = (u64)pp->msi_data;
  111. msg->address_lo = lower_32_bits(msi_target);
  112. msg->address_hi = upper_32_bits(msi_target);
  113. if (pp->ops->get_msi_data)
  114. msg->data = pp->ops->get_msi_data(pp, data->hwirq);
  115. else
  116. msg->data = data->hwirq;
  117. dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
  118. (int)data->hwirq, msg->address_hi, msg->address_lo);
  119. }
  120. static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
  121. const struct cpumask *mask, bool force)
  122. {
  123. return -EINVAL;
  124. }
  125. static void dw_pci_bottom_mask(struct irq_data *data)
  126. {
  127. struct pcie_port *pp = irq_data_get_irq_chip_data(data);
  128. unsigned int res, bit, ctrl;
  129. unsigned long flags;
  130. raw_spin_lock_irqsave(&pp->lock, flags);
  131. if (pp->ops->msi_clear_irq) {
  132. pp->ops->msi_clear_irq(pp, data->hwirq);
  133. } else {
  134. ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
  135. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  136. bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
  137. pp->irq_status[ctrl] &= ~(1 << bit);
  138. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
  139. pp->irq_status[ctrl]);
  140. }
  141. raw_spin_unlock_irqrestore(&pp->lock, flags);
  142. }
  143. static void dw_pci_bottom_unmask(struct irq_data *data)
  144. {
  145. struct pcie_port *pp = irq_data_get_irq_chip_data(data);
  146. unsigned int res, bit, ctrl;
  147. unsigned long flags;
  148. raw_spin_lock_irqsave(&pp->lock, flags);
  149. if (pp->ops->msi_set_irq) {
  150. pp->ops->msi_set_irq(pp, data->hwirq);
  151. } else {
  152. ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
  153. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  154. bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
  155. pp->irq_status[ctrl] |= 1 << bit;
  156. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
  157. pp->irq_status[ctrl]);
  158. }
  159. raw_spin_unlock_irqrestore(&pp->lock, flags);
  160. }
  161. static void dw_pci_bottom_ack(struct irq_data *d)
  162. {
  163. struct msi_desc *msi = irq_data_get_msi_desc(d);
  164. struct pcie_port *pp;
  165. pp = msi_desc_to_pci_sysdata(msi);
  166. if (pp->ops->msi_irq_ack)
  167. pp->ops->msi_irq_ack(d->hwirq, pp);
  168. }
  169. static struct irq_chip dw_pci_msi_bottom_irq_chip = {
  170. .name = "DWPCI-MSI",
  171. .irq_ack = dw_pci_bottom_ack,
  172. .irq_compose_msi_msg = dw_pci_setup_msi_msg,
  173. .irq_set_affinity = dw_pci_msi_set_affinity,
  174. .irq_mask = dw_pci_bottom_mask,
  175. .irq_unmask = dw_pci_bottom_unmask,
  176. };
  177. static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
  178. unsigned int virq, unsigned int nr_irqs,
  179. void *args)
  180. {
  181. struct pcie_port *pp = domain->host_data;
  182. unsigned long flags;
  183. u32 i;
  184. int bit;
  185. raw_spin_lock_irqsave(&pp->lock, flags);
  186. bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
  187. order_base_2(nr_irqs));
  188. raw_spin_unlock_irqrestore(&pp->lock, flags);
  189. if (bit < 0)
  190. return -ENOSPC;
  191. for (i = 0; i < nr_irqs; i++)
  192. irq_domain_set_info(domain, virq + i, bit + i,
  193. &dw_pci_msi_bottom_irq_chip,
  194. pp, handle_edge_irq,
  195. NULL, NULL);
  196. return 0;
  197. }
  198. static void dw_pcie_irq_domain_free(struct irq_domain *domain,
  199. unsigned int virq, unsigned int nr_irqs)
  200. {
  201. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  202. struct pcie_port *pp = irq_data_get_irq_chip_data(data);
  203. unsigned long flags;
  204. raw_spin_lock_irqsave(&pp->lock, flags);
  205. bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
  206. order_base_2(nr_irqs));
  207. raw_spin_unlock_irqrestore(&pp->lock, flags);
  208. }
  209. static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
  210. .alloc = dw_pcie_irq_domain_alloc,
  211. .free = dw_pcie_irq_domain_free,
  212. };
  213. int dw_pcie_allocate_domains(struct pcie_port *pp)
  214. {
  215. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  216. struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
  217. pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
  218. &dw_pcie_msi_domain_ops, pp);
  219. if (!pp->irq_domain) {
  220. dev_err(pci->dev, "Failed to create IRQ domain\n");
  221. return -ENOMEM;
  222. }
  223. pp->msi_domain = pci_msi_create_irq_domain(fwnode,
  224. &dw_pcie_msi_domain_info,
  225. pp->irq_domain);
  226. if (!pp->msi_domain) {
  227. dev_err(pci->dev, "Failed to create MSI domain\n");
  228. irq_domain_remove(pp->irq_domain);
  229. return -ENOMEM;
  230. }
  231. return 0;
  232. }
  233. void dw_pcie_free_msi(struct pcie_port *pp)
  234. {
  235. irq_set_chained_handler(pp->msi_irq, NULL);
  236. irq_set_handler_data(pp->msi_irq, NULL);
  237. irq_domain_remove(pp->msi_domain);
  238. irq_domain_remove(pp->irq_domain);
  239. }
  240. void dw_pcie_msi_init(struct pcie_port *pp)
  241. {
  242. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  243. struct device *dev = pci->dev;
  244. struct page *page;
  245. u64 msi_target;
  246. page = alloc_page(GFP_KERNEL);
  247. pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  248. if (dma_mapping_error(dev, pp->msi_data)) {
  249. dev_err(dev, "Failed to map MSI data\n");
  250. __free_page(page);
  251. return;
  252. }
  253. msi_target = (u64)pp->msi_data;
  254. /* Program the msi_data */
  255. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  256. lower_32_bits(msi_target));
  257. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  258. upper_32_bits(msi_target));
  259. }
  260. int dw_pcie_host_init(struct pcie_port *pp)
  261. {
  262. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  263. struct device *dev = pci->dev;
  264. struct device_node *np = dev->of_node;
  265. struct platform_device *pdev = to_platform_device(dev);
  266. struct resource_entry *win, *tmp;
  267. struct pci_bus *bus, *child;
  268. struct pci_host_bridge *bridge;
  269. struct resource *cfg_res;
  270. int ret;
  271. raw_spin_lock_init(&pci->pp.lock);
  272. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  273. if (cfg_res) {
  274. pp->cfg0_size = resource_size(cfg_res) >> 1;
  275. pp->cfg1_size = resource_size(cfg_res) >> 1;
  276. pp->cfg0_base = cfg_res->start;
  277. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  278. } else if (!pp->va_cfg0_base) {
  279. dev_err(dev, "Missing *config* reg space\n");
  280. }
  281. bridge = pci_alloc_host_bridge(0);
  282. if (!bridge)
  283. return -ENOMEM;
  284. ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
  285. &bridge->windows, &pp->io_base);
  286. if (ret)
  287. return ret;
  288. ret = devm_request_pci_bus_resources(dev, &bridge->windows);
  289. if (ret)
  290. goto error;
  291. /* Get the I/O and memory ranges from DT */
  292. resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
  293. switch (resource_type(win->res)) {
  294. case IORESOURCE_IO:
  295. ret = pci_remap_iospace(win->res, pp->io_base);
  296. if (ret) {
  297. dev_warn(dev, "Error %d: failed to map resource %pR\n",
  298. ret, win->res);
  299. resource_list_destroy_entry(win);
  300. } else {
  301. pp->io = win->res;
  302. pp->io->name = "I/O";
  303. pp->io_size = resource_size(pp->io);
  304. pp->io_bus_addr = pp->io->start - win->offset;
  305. }
  306. break;
  307. case IORESOURCE_MEM:
  308. pp->mem = win->res;
  309. pp->mem->name = "MEM";
  310. pp->mem_size = resource_size(pp->mem);
  311. pp->mem_bus_addr = pp->mem->start - win->offset;
  312. break;
  313. case 0:
  314. pp->cfg = win->res;
  315. pp->cfg0_size = resource_size(pp->cfg) >> 1;
  316. pp->cfg1_size = resource_size(pp->cfg) >> 1;
  317. pp->cfg0_base = pp->cfg->start;
  318. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  319. break;
  320. case IORESOURCE_BUS:
  321. pp->busn = win->res;
  322. break;
  323. }
  324. }
  325. if (!pci->dbi_base) {
  326. pci->dbi_base = devm_pci_remap_cfgspace(dev,
  327. pp->cfg->start,
  328. resource_size(pp->cfg));
  329. if (!pci->dbi_base) {
  330. dev_err(dev, "Error with ioremap\n");
  331. ret = -ENOMEM;
  332. goto error;
  333. }
  334. }
  335. pp->mem_base = pp->mem->start;
  336. if (!pp->va_cfg0_base) {
  337. pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
  338. pp->cfg0_base, pp->cfg0_size);
  339. if (!pp->va_cfg0_base) {
  340. dev_err(dev, "Error with ioremap in function\n");
  341. ret = -ENOMEM;
  342. goto error;
  343. }
  344. }
  345. if (!pp->va_cfg1_base) {
  346. pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
  347. pp->cfg1_base,
  348. pp->cfg1_size);
  349. if (!pp->va_cfg1_base) {
  350. dev_err(dev, "Error with ioremap\n");
  351. ret = -ENOMEM;
  352. goto error;
  353. }
  354. }
  355. ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
  356. if (ret)
  357. pci->num_viewport = 2;
  358. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  359. /*
  360. * If a specific SoC driver needs to change the
  361. * default number of vectors, it needs to implement
  362. * the set_num_vectors callback.
  363. */
  364. if (!pp->ops->set_num_vectors) {
  365. pp->num_vectors = MSI_DEF_NUM_VECTORS;
  366. } else {
  367. pp->ops->set_num_vectors(pp);
  368. if (pp->num_vectors > MAX_MSI_IRQS ||
  369. pp->num_vectors == 0) {
  370. dev_err(dev,
  371. "Invalid number of vectors\n");
  372. goto error;
  373. }
  374. }
  375. if (!pp->ops->msi_host_init) {
  376. ret = dw_pcie_allocate_domains(pp);
  377. if (ret)
  378. goto error;
  379. if (pp->msi_irq)
  380. irq_set_chained_handler_and_data(pp->msi_irq,
  381. dw_chained_msi_isr,
  382. pp);
  383. } else {
  384. ret = pp->ops->msi_host_init(pp);
  385. if (ret < 0)
  386. goto error;
  387. }
  388. }
  389. if (pp->ops->host_init) {
  390. ret = pp->ops->host_init(pp);
  391. if (ret)
  392. goto error;
  393. }
  394. pp->root_bus_nr = pp->busn->start;
  395. bridge->dev.parent = dev;
  396. bridge->sysdata = pp;
  397. bridge->busnr = pp->root_bus_nr;
  398. bridge->ops = &dw_pcie_ops;
  399. bridge->map_irq = of_irq_parse_and_map_pci;
  400. bridge->swizzle_irq = pci_common_swizzle;
  401. ret = pci_scan_root_bus_bridge(bridge);
  402. if (ret)
  403. goto error;
  404. bus = bridge->bus;
  405. if (pp->ops->scan_bus)
  406. pp->ops->scan_bus(pp);
  407. pci_bus_size_bridges(bus);
  408. pci_bus_assign_resources(bus);
  409. list_for_each_entry(child, &bus->children, node)
  410. pcie_bus_configure_settings(child);
  411. pci_bus_add_devices(bus);
  412. return 0;
  413. error:
  414. pci_free_host_bridge(bridge);
  415. return ret;
  416. }
  417. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  418. u32 devfn, int where, int size, u32 *val)
  419. {
  420. int ret, type;
  421. u32 busdev, cfg_size;
  422. u64 cpu_addr;
  423. void __iomem *va_cfg_base;
  424. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  425. if (pp->ops->rd_other_conf)
  426. return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
  427. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  428. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  429. if (bus->parent->number == pp->root_bus_nr) {
  430. type = PCIE_ATU_TYPE_CFG0;
  431. cpu_addr = pp->cfg0_base;
  432. cfg_size = pp->cfg0_size;
  433. va_cfg_base = pp->va_cfg0_base;
  434. } else {
  435. type = PCIE_ATU_TYPE_CFG1;
  436. cpu_addr = pp->cfg1_base;
  437. cfg_size = pp->cfg1_size;
  438. va_cfg_base = pp->va_cfg1_base;
  439. }
  440. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  441. type, cpu_addr,
  442. busdev, cfg_size);
  443. ret = dw_pcie_read(va_cfg_base + where, size, val);
  444. if (pci->num_viewport <= 2)
  445. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  446. PCIE_ATU_TYPE_IO, pp->io_base,
  447. pp->io_bus_addr, pp->io_size);
  448. return ret;
  449. }
  450. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  451. u32 devfn, int where, int size, u32 val)
  452. {
  453. int ret, type;
  454. u32 busdev, cfg_size;
  455. u64 cpu_addr;
  456. void __iomem *va_cfg_base;
  457. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  458. if (pp->ops->wr_other_conf)
  459. return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
  460. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  461. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  462. if (bus->parent->number == pp->root_bus_nr) {
  463. type = PCIE_ATU_TYPE_CFG0;
  464. cpu_addr = pp->cfg0_base;
  465. cfg_size = pp->cfg0_size;
  466. va_cfg_base = pp->va_cfg0_base;
  467. } else {
  468. type = PCIE_ATU_TYPE_CFG1;
  469. cpu_addr = pp->cfg1_base;
  470. cfg_size = pp->cfg1_size;
  471. va_cfg_base = pp->va_cfg1_base;
  472. }
  473. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  474. type, cpu_addr,
  475. busdev, cfg_size);
  476. ret = dw_pcie_write(va_cfg_base + where, size, val);
  477. if (pci->num_viewport <= 2)
  478. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
  479. PCIE_ATU_TYPE_IO, pp->io_base,
  480. pp->io_bus_addr, pp->io_size);
  481. return ret;
  482. }
  483. static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
  484. int dev)
  485. {
  486. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  487. /* If there is no link, then there is no device */
  488. if (bus->number != pp->root_bus_nr) {
  489. if (!dw_pcie_link_up(pci))
  490. return 0;
  491. }
  492. /* Access only one slot on each root port */
  493. if (bus->number == pp->root_bus_nr && dev > 0)
  494. return 0;
  495. return 1;
  496. }
  497. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  498. int size, u32 *val)
  499. {
  500. struct pcie_port *pp = bus->sysdata;
  501. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
  502. *val = 0xffffffff;
  503. return PCIBIOS_DEVICE_NOT_FOUND;
  504. }
  505. if (bus->number == pp->root_bus_nr)
  506. return dw_pcie_rd_own_conf(pp, where, size, val);
  507. return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
  508. }
  509. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  510. int where, int size, u32 val)
  511. {
  512. struct pcie_port *pp = bus->sysdata;
  513. if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
  514. return PCIBIOS_DEVICE_NOT_FOUND;
  515. if (bus->number == pp->root_bus_nr)
  516. return dw_pcie_wr_own_conf(pp, where, size, val);
  517. return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
  518. }
  519. static struct pci_ops dw_pcie_ops = {
  520. .read = dw_pcie_rd_conf,
  521. .write = dw_pcie_wr_conf,
  522. };
  523. static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
  524. {
  525. u32 val;
  526. val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
  527. if (val == 0xffffffff)
  528. return 1;
  529. return 0;
  530. }
  531. void dw_pcie_setup_rc(struct pcie_port *pp)
  532. {
  533. u32 val, ctrl, num_ctrls;
  534. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  535. dw_pcie_setup(pci);
  536. num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  537. /* Initialize IRQ Status array */
  538. for (ctrl = 0; ctrl < num_ctrls; ctrl++)
  539. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
  540. (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
  541. 4, &pp->irq_status[ctrl]);
  542. /* Setup RC BARs */
  543. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
  544. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
  545. /* Setup interrupt pins */
  546. dw_pcie_dbi_ro_wr_en(pci);
  547. val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
  548. val &= 0xffff00ff;
  549. val |= 0x00000100;
  550. dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
  551. dw_pcie_dbi_ro_wr_dis(pci);
  552. /* Setup bus numbers */
  553. val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
  554. val &= 0xff000000;
  555. val |= 0x00ff0100;
  556. dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
  557. /* Setup command register */
  558. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  559. val &= 0xffff0000;
  560. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  561. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  562. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  563. /*
  564. * If the platform provides ->rd_other_conf, it means the platform
  565. * uses its own address translation component rather than ATU, so
  566. * we should not program the ATU here.
  567. */
  568. if (!pp->ops->rd_other_conf) {
  569. /* Get iATU unroll support */
  570. pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
  571. dev_dbg(pci->dev, "iATU unroll: %s\n",
  572. pci->iatu_unroll_enabled ? "enabled" : "disabled");
  573. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
  574. PCIE_ATU_TYPE_MEM, pp->mem_base,
  575. pp->mem_bus_addr, pp->mem_size);
  576. if (pci->num_viewport > 2)
  577. dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
  578. PCIE_ATU_TYPE_IO, pp->io_base,
  579. pp->io_bus_addr, pp->io_size);
  580. }
  581. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  582. /* Enable write permission for the DBI read-only register */
  583. dw_pcie_dbi_ro_wr_en(pci);
  584. /* Program correct class for RC */
  585. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  586. /* Better disable write permission right after the update */
  587. dw_pcie_dbi_ro_wr_dis(pci);
  588. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  589. val |= PORT_LOGIC_SPEED_CHANGE;
  590. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  591. }