pcie-xilinx-nwl.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /*
  2. * PCIe host controller driver for NWL PCIe Bridge
  3. * Based on pcie-xilinx.c, pci-tegra.c
  4. *
  5. * (C) Copyright 2014 - 2015, Xilinx, Inc.
  6. *
  7. * This program is free software: you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation, either version 2 of the License, or
  10. * (at your option) any later version.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/msi.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_pci.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/pci.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/irqchip/chained_irq.h>
  26. /* Bridge core config registers */
  27. #define BRCFG_PCIE_RX0 0x00000000
  28. #define BRCFG_INTERRUPT 0x00000010
  29. #define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
  30. /* Egress - Bridge translation registers */
  31. #define E_BREG_CAPABILITIES 0x00000200
  32. #define E_BREG_CONTROL 0x00000208
  33. #define E_BREG_BASE_LO 0x00000210
  34. #define E_BREG_BASE_HI 0x00000214
  35. #define E_ECAM_CAPABILITIES 0x00000220
  36. #define E_ECAM_CONTROL 0x00000228
  37. #define E_ECAM_BASE_LO 0x00000230
  38. #define E_ECAM_BASE_HI 0x00000234
  39. /* Ingress - address translations */
  40. #define I_MSII_CAPABILITIES 0x00000300
  41. #define I_MSII_CONTROL 0x00000308
  42. #define I_MSII_BASE_LO 0x00000310
  43. #define I_MSII_BASE_HI 0x00000314
  44. #define I_ISUB_CONTROL 0x000003E8
  45. #define SET_ISUB_CONTROL BIT(0)
  46. /* Rxed msg fifo - Interrupt status registers */
  47. #define MSGF_MISC_STATUS 0x00000400
  48. #define MSGF_MISC_MASK 0x00000404
  49. #define MSGF_LEG_STATUS 0x00000420
  50. #define MSGF_LEG_MASK 0x00000424
  51. #define MSGF_MSI_STATUS_LO 0x00000440
  52. #define MSGF_MSI_STATUS_HI 0x00000444
  53. #define MSGF_MSI_MASK_LO 0x00000448
  54. #define MSGF_MSI_MASK_HI 0x0000044C
  55. /* Msg filter mask bits */
  56. #define CFG_ENABLE_PM_MSG_FWD BIT(1)
  57. #define CFG_ENABLE_INT_MSG_FWD BIT(2)
  58. #define CFG_ENABLE_ERR_MSG_FWD BIT(3)
  59. #define CFG_ENABLE_SLT_MSG_FWD BIT(5)
  60. #define CFG_ENABLE_VEN_MSG_FWD BIT(7)
  61. #define CFG_ENABLE_OTH_MSG_FWD BIT(13)
  62. #define CFG_ENABLE_VEN_MSG_EN BIT(14)
  63. #define CFG_ENABLE_VEN_MSG_VEN_INV BIT(15)
  64. #define CFG_ENABLE_VEN_MSG_VEN_ID GENMASK(31, 16)
  65. #define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \
  66. CFG_ENABLE_INT_MSG_FWD | \
  67. CFG_ENABLE_ERR_MSG_FWD | \
  68. CFG_ENABLE_SLT_MSG_FWD | \
  69. CFG_ENABLE_VEN_MSG_FWD | \
  70. CFG_ENABLE_OTH_MSG_FWD | \
  71. CFG_ENABLE_VEN_MSG_EN | \
  72. CFG_ENABLE_VEN_MSG_VEN_INV | \
  73. CFG_ENABLE_VEN_MSG_VEN_ID)
  74. /* Misc interrupt status mask bits */
  75. #define MSGF_MISC_SR_RXMSG_AVAIL BIT(0)
  76. #define MSGF_MISC_SR_RXMSG_OVER BIT(1)
  77. #define MSGF_MISC_SR_SLAVE_ERR BIT(4)
  78. #define MSGF_MISC_SR_MASTER_ERR BIT(5)
  79. #define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
  80. #define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
  81. #define MSGF_MISC_SR_UR_DETECT BIT(20)
  82. #define MSGF_MISC_SR_PCIE_CORE GENMASK(18, 16)
  83. #define MSGF_MISC_SR_PCIE_CORE_ERR GENMASK(31, 22)
  84. #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
  85. MSGF_MISC_SR_RXMSG_OVER | \
  86. MSGF_MISC_SR_SLAVE_ERR | \
  87. MSGF_MISC_SR_MASTER_ERR | \
  88. MSGF_MISC_SR_I_ADDR_ERR | \
  89. MSGF_MISC_SR_E_ADDR_ERR | \
  90. MSGF_MISC_SR_UR_DETECT | \
  91. MSGF_MISC_SR_PCIE_CORE | \
  92. MSGF_MISC_SR_PCIE_CORE_ERR)
  93. /* Legacy interrupt status mask bits */
  94. #define MSGF_LEG_SR_INTA BIT(0)
  95. #define MSGF_LEG_SR_INTB BIT(1)
  96. #define MSGF_LEG_SR_INTC BIT(2)
  97. #define MSGF_LEG_SR_INTD BIT(3)
  98. #define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
  99. MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
  100. /* MSI interrupt status mask bits */
  101. #define MSGF_MSI_SR_LO_MASK BIT(0)
  102. #define MSGF_MSI_SR_HI_MASK BIT(0)
  103. #define MSII_PRESENT BIT(0)
  104. #define MSII_ENABLE BIT(0)
  105. #define MSII_STATUS_ENABLE BIT(15)
  106. /* Bridge config interrupt mask */
  107. #define BRCFG_INTERRUPT_MASK BIT(0)
  108. #define BREG_PRESENT BIT(0)
  109. #define BREG_ENABLE BIT(0)
  110. #define BREG_ENABLE_FORCE BIT(1)
  111. /* E_ECAM status mask bits */
  112. #define E_ECAM_PRESENT BIT(0)
  113. #define E_ECAM_CR_ENABLE BIT(0)
  114. #define E_ECAM_SIZE_LOC GENMASK(20, 16)
  115. #define E_ECAM_SIZE_SHIFT 16
  116. #define ECAM_BUS_LOC_SHIFT 20
  117. #define ECAM_DEV_LOC_SHIFT 12
  118. #define NWL_ECAM_VALUE_DEFAULT 12
  119. #define CFG_DMA_REG_BAR GENMASK(2, 0)
  120. #define INT_PCI_MSI_NR (2 * 32)
  121. #define INTX_NUM 4
  122. /* Readin the PS_LINKUP */
  123. #define PS_LINKUP_OFFSET 0x00000238
  124. #define PCIE_PHY_LINKUP_BIT BIT(0)
  125. #define PHY_RDY_LINKUP_BIT BIT(1)
  126. /* Parameters for the waiting for link up routine */
  127. #define LINK_WAIT_MAX_RETRIES 10
  128. #define LINK_WAIT_USLEEP_MIN 90000
  129. #define LINK_WAIT_USLEEP_MAX 100000
  130. struct nwl_msi { /* MSI information */
  131. struct irq_domain *msi_domain;
  132. unsigned long *bitmap;
  133. struct irq_domain *dev_domain;
  134. struct mutex lock; /* protect bitmap variable */
  135. int irq_msi0;
  136. int irq_msi1;
  137. };
  138. struct nwl_pcie {
  139. struct device *dev;
  140. void __iomem *breg_base;
  141. void __iomem *pcireg_base;
  142. void __iomem *ecam_base;
  143. phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
  144. phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
  145. phys_addr_t phys_ecam_base; /* Physical Configuration Base */
  146. u32 breg_size;
  147. u32 pcie_reg_size;
  148. u32 ecam_size;
  149. int irq_intx;
  150. int irq_misc;
  151. u32 ecam_value;
  152. u8 last_busno;
  153. u8 root_busno;
  154. struct nwl_msi msi;
  155. struct irq_domain *legacy_irq_domain;
  156. };
  157. static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
  158. {
  159. return readl(pcie->breg_base + off);
  160. }
  161. static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
  162. {
  163. writel(val, pcie->breg_base + off);
  164. }
  165. static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
  166. {
  167. if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
  168. return true;
  169. return false;
  170. }
  171. static bool nwl_phy_link_up(struct nwl_pcie *pcie)
  172. {
  173. if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
  174. return true;
  175. return false;
  176. }
  177. static int nwl_wait_for_link(struct nwl_pcie *pcie)
  178. {
  179. int retries;
  180. /* check if the link is up or not */
  181. for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
  182. if (nwl_phy_link_up(pcie))
  183. return 0;
  184. usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
  185. }
  186. dev_err(pcie->dev, "PHY link never came up\n");
  187. return -ETIMEDOUT;
  188. }
  189. static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
  190. {
  191. struct nwl_pcie *pcie = bus->sysdata;
  192. /* Check link before accessing downstream ports */
  193. if (bus->number != pcie->root_busno) {
  194. if (!nwl_pcie_link_up(pcie))
  195. return false;
  196. }
  197. /* Only one device down on each root port */
  198. if (bus->number == pcie->root_busno && devfn > 0)
  199. return false;
  200. return true;
  201. }
  202. /**
  203. * nwl_pcie_map_bus - Get configuration base
  204. *
  205. * @bus: Bus structure of current bus
  206. * @devfn: Device/function
  207. * @where: Offset from base
  208. *
  209. * Return: Base address of the configuration space needed to be
  210. * accessed.
  211. */
  212. static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
  213. int where)
  214. {
  215. struct nwl_pcie *pcie = bus->sysdata;
  216. int relbus;
  217. if (!nwl_pcie_valid_device(bus, devfn))
  218. return NULL;
  219. relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
  220. (devfn << ECAM_DEV_LOC_SHIFT);
  221. return pcie->ecam_base + relbus + where;
  222. }
  223. /* PCIe operations */
  224. static struct pci_ops nwl_pcie_ops = {
  225. .map_bus = nwl_pcie_map_bus,
  226. .read = pci_generic_config_read,
  227. .write = pci_generic_config_write,
  228. };
  229. static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
  230. {
  231. struct nwl_pcie *pcie = data;
  232. u32 misc_stat;
  233. /* Checking for misc interrupts */
  234. misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
  235. MSGF_MISC_SR_MASKALL;
  236. if (!misc_stat)
  237. return IRQ_NONE;
  238. if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
  239. dev_err(pcie->dev, "Received Message FIFO Overflow\n");
  240. if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
  241. dev_err(pcie->dev, "Slave error\n");
  242. if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
  243. dev_err(pcie->dev, "Master error\n");
  244. if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
  245. dev_err(pcie->dev,
  246. "In Misc Ingress address translation error\n");
  247. if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
  248. dev_err(pcie->dev,
  249. "In Misc Egress address translation error\n");
  250. if (misc_stat & MSGF_MISC_SR_PCIE_CORE_ERR)
  251. dev_err(pcie->dev, "PCIe Core error\n");
  252. /* Clear misc interrupt status */
  253. nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
  254. return IRQ_HANDLED;
  255. }
  256. static void nwl_pcie_leg_handler(struct irq_desc *desc)
  257. {
  258. struct irq_chip *chip = irq_desc_get_chip(desc);
  259. struct nwl_pcie *pcie;
  260. unsigned long status;
  261. u32 bit;
  262. u32 virq;
  263. chained_irq_enter(chip, desc);
  264. pcie = irq_desc_get_handler_data(desc);
  265. while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
  266. MSGF_LEG_SR_MASKALL) != 0) {
  267. for_each_set_bit(bit, &status, INTX_NUM) {
  268. virq = irq_find_mapping(pcie->legacy_irq_domain,
  269. bit + 1);
  270. if (virq)
  271. generic_handle_irq(virq);
  272. }
  273. }
  274. chained_irq_exit(chip, desc);
  275. }
  276. static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
  277. {
  278. struct nwl_msi *msi;
  279. unsigned long status;
  280. u32 bit;
  281. u32 virq;
  282. msi = &pcie->msi;
  283. while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
  284. for_each_set_bit(bit, &status, 32) {
  285. nwl_bridge_writel(pcie, 1 << bit, status_reg);
  286. virq = irq_find_mapping(msi->dev_domain, bit);
  287. if (virq)
  288. generic_handle_irq(virq);
  289. }
  290. }
  291. }
  292. static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
  293. {
  294. struct irq_chip *chip = irq_desc_get_chip(desc);
  295. struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
  296. chained_irq_enter(chip, desc);
  297. nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
  298. chained_irq_exit(chip, desc);
  299. }
  300. static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
  301. {
  302. struct irq_chip *chip = irq_desc_get_chip(desc);
  303. struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
  304. chained_irq_enter(chip, desc);
  305. nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
  306. chained_irq_exit(chip, desc);
  307. }
  308. static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
  309. irq_hw_number_t hwirq)
  310. {
  311. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  312. irq_set_chip_data(irq, domain->host_data);
  313. return 0;
  314. }
  315. static const struct irq_domain_ops legacy_domain_ops = {
  316. .map = nwl_legacy_map,
  317. };
  318. #ifdef CONFIG_PCI_MSI
  319. static struct irq_chip nwl_msi_irq_chip = {
  320. .name = "nwl_pcie:msi",
  321. .irq_enable = unmask_msi_irq,
  322. .irq_disable = mask_msi_irq,
  323. .irq_mask = mask_msi_irq,
  324. .irq_unmask = unmask_msi_irq,
  325. };
  326. static struct msi_domain_info nwl_msi_domain_info = {
  327. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  328. MSI_FLAG_MULTI_PCI_MSI),
  329. .chip = &nwl_msi_irq_chip,
  330. };
  331. #endif
  332. static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  333. {
  334. struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
  335. phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
  336. msg->address_lo = lower_32_bits(msi_addr);
  337. msg->address_hi = upper_32_bits(msi_addr);
  338. msg->data = data->hwirq;
  339. }
  340. static int nwl_msi_set_affinity(struct irq_data *irq_data,
  341. const struct cpumask *mask, bool force)
  342. {
  343. return -EINVAL;
  344. }
  345. static struct irq_chip nwl_irq_chip = {
  346. .name = "Xilinx MSI",
  347. .irq_compose_msi_msg = nwl_compose_msi_msg,
  348. .irq_set_affinity = nwl_msi_set_affinity,
  349. };
  350. static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  351. unsigned int nr_irqs, void *args)
  352. {
  353. struct nwl_pcie *pcie = domain->host_data;
  354. struct nwl_msi *msi = &pcie->msi;
  355. int bit;
  356. int i;
  357. mutex_lock(&msi->lock);
  358. bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
  359. nr_irqs, 0);
  360. if (bit >= INT_PCI_MSI_NR) {
  361. mutex_unlock(&msi->lock);
  362. return -ENOSPC;
  363. }
  364. bitmap_set(msi->bitmap, bit, nr_irqs);
  365. for (i = 0; i < nr_irqs; i++) {
  366. irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
  367. domain->host_data, handle_simple_irq,
  368. NULL, NULL);
  369. }
  370. mutex_unlock(&msi->lock);
  371. return 0;
  372. }
  373. static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  374. unsigned int nr_irqs)
  375. {
  376. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  377. struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
  378. struct nwl_msi *msi = &pcie->msi;
  379. mutex_lock(&msi->lock);
  380. bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
  381. mutex_unlock(&msi->lock);
  382. }
  383. static const struct irq_domain_ops dev_msi_domain_ops = {
  384. .alloc = nwl_irq_domain_alloc,
  385. .free = nwl_irq_domain_free,
  386. };
  387. static void nwl_msi_free_irq_domain(struct nwl_pcie *pcie)
  388. {
  389. struct nwl_msi *msi = &pcie->msi;
  390. if (msi->irq_msi0)
  391. irq_set_chained_handler_and_data(msi->irq_msi0, NULL, NULL);
  392. if (msi->irq_msi1)
  393. irq_set_chained_handler_and_data(msi->irq_msi1, NULL, NULL);
  394. if (msi->msi_domain)
  395. irq_domain_remove(msi->msi_domain);
  396. if (msi->dev_domain)
  397. irq_domain_remove(msi->dev_domain);
  398. kfree(msi->bitmap);
  399. msi->bitmap = NULL;
  400. }
  401. static void nwl_pcie_free_irq_domain(struct nwl_pcie *pcie)
  402. {
  403. int i;
  404. u32 irq;
  405. for (i = 0; i < INTX_NUM; i++) {
  406. irq = irq_find_mapping(pcie->legacy_irq_domain, i + 1);
  407. if (irq > 0)
  408. irq_dispose_mapping(irq);
  409. }
  410. if (pcie->legacy_irq_domain)
  411. irq_domain_remove(pcie->legacy_irq_domain);
  412. nwl_msi_free_irq_domain(pcie);
  413. }
  414. static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
  415. {
  416. #ifdef CONFIG_PCI_MSI
  417. struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
  418. struct nwl_msi *msi = &pcie->msi;
  419. msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
  420. &dev_msi_domain_ops, pcie);
  421. if (!msi->dev_domain) {
  422. dev_err(pcie->dev, "failed to create dev IRQ domain\n");
  423. return -ENOMEM;
  424. }
  425. msi->msi_domain = pci_msi_create_irq_domain(fwnode,
  426. &nwl_msi_domain_info,
  427. msi->dev_domain);
  428. if (!msi->msi_domain) {
  429. dev_err(pcie->dev, "failed to create msi IRQ domain\n");
  430. irq_domain_remove(msi->dev_domain);
  431. return -ENOMEM;
  432. }
  433. #endif
  434. return 0;
  435. }
  436. static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
  437. {
  438. struct device_node *node = pcie->dev->of_node;
  439. struct device_node *legacy_intc_node;
  440. legacy_intc_node = of_get_next_child(node, NULL);
  441. if (!legacy_intc_node) {
  442. dev_err(pcie->dev, "No legacy intc node found\n");
  443. return -EINVAL;
  444. }
  445. pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
  446. INTX_NUM,
  447. &legacy_domain_ops,
  448. pcie);
  449. if (!pcie->legacy_irq_domain) {
  450. dev_err(pcie->dev, "failed to create IRQ domain\n");
  451. return -ENOMEM;
  452. }
  453. nwl_pcie_init_msi_irq_domain(pcie);
  454. return 0;
  455. }
  456. static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
  457. {
  458. struct platform_device *pdev = to_platform_device(pcie->dev);
  459. struct nwl_msi *msi = &pcie->msi;
  460. unsigned long base;
  461. int ret;
  462. int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
  463. mutex_init(&msi->lock);
  464. msi->bitmap = kzalloc(size, GFP_KERNEL);
  465. if (!msi->bitmap)
  466. return -ENOMEM;
  467. /* Get msi_1 IRQ number */
  468. msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
  469. if (msi->irq_msi1 < 0) {
  470. dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
  471. ret = -EINVAL;
  472. goto err;
  473. }
  474. irq_set_chained_handler_and_data(msi->irq_msi1,
  475. nwl_pcie_msi_handler_high, pcie);
  476. /* Get msi_0 IRQ number */
  477. msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
  478. if (msi->irq_msi0 < 0) {
  479. dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
  480. ret = -EINVAL;
  481. goto err;
  482. }
  483. irq_set_chained_handler_and_data(msi->irq_msi0,
  484. nwl_pcie_msi_handler_low, pcie);
  485. /* Check for msii_present bit */
  486. ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
  487. if (!ret) {
  488. dev_err(pcie->dev, "MSI not present\n");
  489. ret = -EIO;
  490. goto err;
  491. }
  492. /* Enable MSII */
  493. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
  494. MSII_ENABLE, I_MSII_CONTROL);
  495. /* Enable MSII status */
  496. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
  497. MSII_STATUS_ENABLE, I_MSII_CONTROL);
  498. /* setup AFI/FPCI range */
  499. base = pcie->phys_pcie_reg_base;
  500. nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
  501. nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
  502. /*
  503. * For high range MSI interrupts: disable, clear any pending,
  504. * and enable
  505. */
  506. nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
  507. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) &
  508. MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
  509. nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
  510. /*
  511. * For low range MSI interrupts: disable, clear any pending,
  512. * and enable
  513. */
  514. nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
  515. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
  516. MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
  517. nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
  518. return 0;
  519. err:
  520. kfree(msi->bitmap);
  521. msi->bitmap = NULL;
  522. return ret;
  523. }
  524. static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
  525. {
  526. struct platform_device *pdev = to_platform_device(pcie->dev);
  527. u32 breg_val, ecam_val, first_busno = 0;
  528. int err;
  529. breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
  530. if (!breg_val) {
  531. dev_err(pcie->dev, "BREG is not present\n");
  532. return breg_val;
  533. }
  534. /* Write bridge_off to breg base */
  535. nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
  536. E_BREG_BASE_LO);
  537. nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
  538. E_BREG_BASE_HI);
  539. /* Enable BREG */
  540. nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
  541. E_BREG_CONTROL);
  542. /* Disable DMA channel registers */
  543. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
  544. CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
  545. /* Enable Ingress subtractive decode translation */
  546. nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
  547. /* Enable msg filtering details */
  548. nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
  549. BRCFG_PCIE_RX_MSG_FILTER);
  550. err = nwl_wait_for_link(pcie);
  551. if (err)
  552. return err;
  553. ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
  554. if (!ecam_val) {
  555. dev_err(pcie->dev, "ECAM is not present\n");
  556. return ecam_val;
  557. }
  558. /* Enable ECAM */
  559. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
  560. E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
  561. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
  562. (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
  563. E_ECAM_CONTROL);
  564. nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
  565. E_ECAM_BASE_LO);
  566. nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
  567. E_ECAM_BASE_HI);
  568. /* Get bus range */
  569. ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
  570. pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
  571. /* Write primary, secondary and subordinate bus numbers */
  572. ecam_val = first_busno;
  573. ecam_val |= (first_busno + 1) << 8;
  574. ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
  575. writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
  576. if (nwl_pcie_link_up(pcie))
  577. dev_info(pcie->dev, "Link is UP\n");
  578. else
  579. dev_info(pcie->dev, "Link is DOWN\n");
  580. /* Get misc IRQ number */
  581. pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
  582. if (pcie->irq_misc < 0) {
  583. dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
  584. pcie->irq_misc);
  585. return -EINVAL;
  586. }
  587. err = devm_request_irq(pcie->dev, pcie->irq_misc,
  588. nwl_pcie_misc_handler, IRQF_SHARED,
  589. "nwl_pcie:misc", pcie);
  590. if (err) {
  591. dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
  592. pcie->irq_misc);
  593. return err;
  594. }
  595. /* Disable all misc interrupts */
  596. nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
  597. /* Clear pending misc interrupts */
  598. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
  599. MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
  600. /* Enable all misc interrupts */
  601. nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
  602. /* Disable all legacy interrupts */
  603. nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
  604. /* Clear pending legacy interrupts */
  605. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
  606. MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
  607. /* Enable all legacy interrupts */
  608. nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
  609. /* Enable the bridge config interrupt */
  610. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
  611. BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
  612. return 0;
  613. }
  614. static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
  615. struct platform_device *pdev)
  616. {
  617. struct device_node *node = pcie->dev->of_node;
  618. struct resource *res;
  619. const char *type;
  620. /* Check for device type */
  621. type = of_get_property(node, "device_type", NULL);
  622. if (!type || strcmp(type, "pci")) {
  623. dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
  624. return -EINVAL;
  625. }
  626. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
  627. pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
  628. if (IS_ERR(pcie->breg_base))
  629. return PTR_ERR(pcie->breg_base);
  630. pcie->phys_breg_base = res->start;
  631. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
  632. pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
  633. if (IS_ERR(pcie->pcireg_base))
  634. return PTR_ERR(pcie->pcireg_base);
  635. pcie->phys_pcie_reg_base = res->start;
  636. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
  637. pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
  638. if (IS_ERR(pcie->ecam_base))
  639. return PTR_ERR(pcie->ecam_base);
  640. pcie->phys_ecam_base = res->start;
  641. /* Get intx IRQ number */
  642. pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
  643. if (pcie->irq_intx < 0) {
  644. dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
  645. pcie->irq_intx);
  646. return -EINVAL;
  647. }
  648. irq_set_chained_handler_and_data(pcie->irq_intx,
  649. nwl_pcie_leg_handler, pcie);
  650. return 0;
  651. }
  652. static const struct of_device_id nwl_pcie_of_match[] = {
  653. { .compatible = "xlnx,nwl-pcie-2.11", },
  654. {}
  655. };
  656. static int nwl_pcie_probe(struct platform_device *pdev)
  657. {
  658. struct device_node *node = pdev->dev.of_node;
  659. struct nwl_pcie *pcie;
  660. struct pci_bus *bus;
  661. struct pci_bus *child;
  662. int err;
  663. resource_size_t iobase = 0;
  664. LIST_HEAD(res);
  665. pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
  666. if (!pcie)
  667. return -ENOMEM;
  668. pcie->dev = &pdev->dev;
  669. pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
  670. err = nwl_pcie_parse_dt(pcie, pdev);
  671. if (err) {
  672. dev_err(pcie->dev, "Parsing DT failed\n");
  673. return err;
  674. }
  675. err = nwl_pcie_bridge_init(pcie);
  676. if (err) {
  677. dev_err(pcie->dev, "HW Initalization failed\n");
  678. return err;
  679. }
  680. err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
  681. if (err) {
  682. pr_err("Getting bridge resources failed\n");
  683. return err;
  684. }
  685. err = nwl_pcie_init_irq_domain(pcie);
  686. if (err) {
  687. dev_err(pcie->dev, "Failed creating IRQ Domain\n");
  688. return err;
  689. }
  690. bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
  691. &nwl_pcie_ops, pcie, &res);
  692. if (!bus)
  693. return -ENOMEM;
  694. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  695. err = nwl_pcie_enable_msi(pcie, bus);
  696. if (err < 0) {
  697. dev_err(&pdev->dev,
  698. "failed to enable MSI support: %d\n", err);
  699. return err;
  700. }
  701. }
  702. pci_scan_child_bus(bus);
  703. pci_assign_unassigned_bus_resources(bus);
  704. list_for_each_entry(child, &bus->children, node)
  705. pcie_bus_configure_settings(child);
  706. pci_bus_add_devices(bus);
  707. platform_set_drvdata(pdev, pcie);
  708. return 0;
  709. }
  710. static int nwl_pcie_remove(struct platform_device *pdev)
  711. {
  712. struct nwl_pcie *pcie = platform_get_drvdata(pdev);
  713. nwl_pcie_free_irq_domain(pcie);
  714. platform_set_drvdata(pdev, NULL);
  715. return 0;
  716. }
  717. static struct platform_driver nwl_pcie_driver = {
  718. .driver = {
  719. .name = "nwl-pcie",
  720. .of_match_table = nwl_pcie_of_match,
  721. },
  722. .probe = nwl_pcie_probe,
  723. .remove = nwl_pcie_remove,
  724. };
  725. module_platform_driver(nwl_pcie_driver);
  726. MODULE_AUTHOR("Xilinx, Inc");
  727. MODULE_DESCRIPTION("NWL PCIe driver");
  728. MODULE_LICENSE("GPL");