pcie-xilinx-nwl.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * PCIe host controller driver for NWL PCIe Bridge
  4. * Based on pcie-xilinx.c, pci-tegra.c
  5. *
  6. * (C) Copyright 2014 - 2015, Xilinx, Inc.
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/kernel.h>
  13. #include <linux/init.h>
  14. #include <linux/msi.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_pci.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/pci.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/irqchip/chained_irq.h>
  22. /* Bridge core config registers */
  23. #define BRCFG_PCIE_RX0 0x00000000
  24. #define BRCFG_INTERRUPT 0x00000010
  25. #define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
  26. /* Egress - Bridge translation registers */
  27. #define E_BREG_CAPABILITIES 0x00000200
  28. #define E_BREG_CONTROL 0x00000208
  29. #define E_BREG_BASE_LO 0x00000210
  30. #define E_BREG_BASE_HI 0x00000214
  31. #define E_ECAM_CAPABILITIES 0x00000220
  32. #define E_ECAM_CONTROL 0x00000228
  33. #define E_ECAM_BASE_LO 0x00000230
  34. #define E_ECAM_BASE_HI 0x00000234
  35. /* Ingress - address translations */
  36. #define I_MSII_CAPABILITIES 0x00000300
  37. #define I_MSII_CONTROL 0x00000308
  38. #define I_MSII_BASE_LO 0x00000310
  39. #define I_MSII_BASE_HI 0x00000314
  40. #define I_ISUB_CONTROL 0x000003E8
  41. #define SET_ISUB_CONTROL BIT(0)
  42. /* Rxed msg fifo - Interrupt status registers */
  43. #define MSGF_MISC_STATUS 0x00000400
  44. #define MSGF_MISC_MASK 0x00000404
  45. #define MSGF_LEG_STATUS 0x00000420
  46. #define MSGF_LEG_MASK 0x00000424
  47. #define MSGF_MSI_STATUS_LO 0x00000440
  48. #define MSGF_MSI_STATUS_HI 0x00000444
  49. #define MSGF_MSI_MASK_LO 0x00000448
  50. #define MSGF_MSI_MASK_HI 0x0000044C
  51. /* Msg filter mask bits */
  52. #define CFG_ENABLE_PM_MSG_FWD BIT(1)
  53. #define CFG_ENABLE_INT_MSG_FWD BIT(2)
  54. #define CFG_ENABLE_ERR_MSG_FWD BIT(3)
  55. #define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \
  56. CFG_ENABLE_INT_MSG_FWD | \
  57. CFG_ENABLE_ERR_MSG_FWD)
  58. /* Misc interrupt status mask bits */
  59. #define MSGF_MISC_SR_RXMSG_AVAIL BIT(0)
  60. #define MSGF_MISC_SR_RXMSG_OVER BIT(1)
  61. #define MSGF_MISC_SR_SLAVE_ERR BIT(4)
  62. #define MSGF_MISC_SR_MASTER_ERR BIT(5)
  63. #define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
  64. #define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
  65. #define MSGF_MISC_SR_FATAL_AER BIT(16)
  66. #define MSGF_MISC_SR_NON_FATAL_AER BIT(17)
  67. #define MSGF_MISC_SR_CORR_AER BIT(18)
  68. #define MSGF_MISC_SR_UR_DETECT BIT(20)
  69. #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
  70. #define MSGF_MISC_SR_FATAL_DEV BIT(23)
  71. #define MSGF_MISC_SR_LINK_DOWN BIT(24)
  72. #define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
  73. #define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
  74. #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
  75. MSGF_MISC_SR_RXMSG_OVER | \
  76. MSGF_MISC_SR_SLAVE_ERR | \
  77. MSGF_MISC_SR_MASTER_ERR | \
  78. MSGF_MISC_SR_I_ADDR_ERR | \
  79. MSGF_MISC_SR_E_ADDR_ERR | \
  80. MSGF_MISC_SR_FATAL_AER | \
  81. MSGF_MISC_SR_NON_FATAL_AER | \
  82. MSGF_MISC_SR_CORR_AER | \
  83. MSGF_MISC_SR_UR_DETECT | \
  84. MSGF_MISC_SR_NON_FATAL_DEV | \
  85. MSGF_MISC_SR_FATAL_DEV | \
  86. MSGF_MISC_SR_LINK_DOWN | \
  87. MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
  88. MSGF_MSIC_SR_LINK_BWIDTH)
  89. /* Legacy interrupt status mask bits */
  90. #define MSGF_LEG_SR_INTA BIT(0)
  91. #define MSGF_LEG_SR_INTB BIT(1)
  92. #define MSGF_LEG_SR_INTC BIT(2)
  93. #define MSGF_LEG_SR_INTD BIT(3)
  94. #define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
  95. MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
  96. /* MSI interrupt status mask bits */
  97. #define MSGF_MSI_SR_LO_MASK GENMASK(31, 0)
  98. #define MSGF_MSI_SR_HI_MASK GENMASK(31, 0)
  99. #define MSII_PRESENT BIT(0)
  100. #define MSII_ENABLE BIT(0)
  101. #define MSII_STATUS_ENABLE BIT(15)
  102. /* Bridge config interrupt mask */
  103. #define BRCFG_INTERRUPT_MASK BIT(0)
  104. #define BREG_PRESENT BIT(0)
  105. #define BREG_ENABLE BIT(0)
  106. #define BREG_ENABLE_FORCE BIT(1)
  107. /* E_ECAM status mask bits */
  108. #define E_ECAM_PRESENT BIT(0)
  109. #define E_ECAM_CR_ENABLE BIT(0)
  110. #define E_ECAM_SIZE_LOC GENMASK(20, 16)
  111. #define E_ECAM_SIZE_SHIFT 16
  112. #define ECAM_BUS_LOC_SHIFT 20
  113. #define ECAM_DEV_LOC_SHIFT 12
  114. #define NWL_ECAM_VALUE_DEFAULT 12
  115. #define CFG_DMA_REG_BAR GENMASK(2, 0)
  116. #define INT_PCI_MSI_NR (2 * 32)
  117. /* Readin the PS_LINKUP */
  118. #define PS_LINKUP_OFFSET 0x00000238
  119. #define PCIE_PHY_LINKUP_BIT BIT(0)
  120. #define PHY_RDY_LINKUP_BIT BIT(1)
  121. /* Parameters for the waiting for link up routine */
  122. #define LINK_WAIT_MAX_RETRIES 10
  123. #define LINK_WAIT_USLEEP_MIN 90000
  124. #define LINK_WAIT_USLEEP_MAX 100000
  125. struct nwl_msi { /* MSI information */
  126. struct irq_domain *msi_domain;
  127. unsigned long *bitmap;
  128. struct irq_domain *dev_domain;
  129. struct mutex lock; /* protect bitmap variable */
  130. int irq_msi0;
  131. int irq_msi1;
  132. };
  133. struct nwl_pcie {
  134. struct device *dev;
  135. void __iomem *breg_base;
  136. void __iomem *pcireg_base;
  137. void __iomem *ecam_base;
  138. phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
  139. phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
  140. phys_addr_t phys_ecam_base; /* Physical Configuration Base */
  141. u32 breg_size;
  142. u32 pcie_reg_size;
  143. u32 ecam_size;
  144. int irq_intx;
  145. int irq_misc;
  146. u32 ecam_value;
  147. u8 last_busno;
  148. u8 root_busno;
  149. struct nwl_msi msi;
  150. struct irq_domain *legacy_irq_domain;
  151. raw_spinlock_t leg_mask_lock;
  152. };
  153. static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
  154. {
  155. return readl(pcie->breg_base + off);
  156. }
  157. static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
  158. {
  159. writel(val, pcie->breg_base + off);
  160. }
  161. static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
  162. {
  163. if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
  164. return true;
  165. return false;
  166. }
  167. static bool nwl_phy_link_up(struct nwl_pcie *pcie)
  168. {
  169. if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
  170. return true;
  171. return false;
  172. }
  173. static int nwl_wait_for_link(struct nwl_pcie *pcie)
  174. {
  175. struct device *dev = pcie->dev;
  176. int retries;
  177. /* check if the link is up or not */
  178. for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
  179. if (nwl_phy_link_up(pcie))
  180. return 0;
  181. usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
  182. }
  183. dev_err(dev, "PHY link never came up\n");
  184. return -ETIMEDOUT;
  185. }
  186. static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
  187. {
  188. struct nwl_pcie *pcie = bus->sysdata;
  189. /* Check link before accessing downstream ports */
  190. if (bus->number != pcie->root_busno) {
  191. if (!nwl_pcie_link_up(pcie))
  192. return false;
  193. }
  194. /* Only one device down on each root port */
  195. if (bus->number == pcie->root_busno && devfn > 0)
  196. return false;
  197. return true;
  198. }
  199. /**
  200. * nwl_pcie_map_bus - Get configuration base
  201. *
  202. * @bus: Bus structure of current bus
  203. * @devfn: Device/function
  204. * @where: Offset from base
  205. *
  206. * Return: Base address of the configuration space needed to be
  207. * accessed.
  208. */
  209. static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
  210. int where)
  211. {
  212. struct nwl_pcie *pcie = bus->sysdata;
  213. int relbus;
  214. if (!nwl_pcie_valid_device(bus, devfn))
  215. return NULL;
  216. relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
  217. (devfn << ECAM_DEV_LOC_SHIFT);
  218. return pcie->ecam_base + relbus + where;
  219. }
  220. /* PCIe operations */
  221. static struct pci_ops nwl_pcie_ops = {
  222. .map_bus = nwl_pcie_map_bus,
  223. .read = pci_generic_config_read,
  224. .write = pci_generic_config_write,
  225. };
  226. static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
  227. {
  228. struct nwl_pcie *pcie = data;
  229. struct device *dev = pcie->dev;
  230. u32 misc_stat;
  231. /* Checking for misc interrupts */
  232. misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
  233. MSGF_MISC_SR_MASKALL;
  234. if (!misc_stat)
  235. return IRQ_NONE;
  236. if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
  237. dev_err(dev, "Received Message FIFO Overflow\n");
  238. if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
  239. dev_err(dev, "Slave error\n");
  240. if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
  241. dev_err(dev, "Master error\n");
  242. if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
  243. dev_err(dev, "In Misc Ingress address translation error\n");
  244. if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
  245. dev_err(dev, "In Misc Egress address translation error\n");
  246. if (misc_stat & MSGF_MISC_SR_FATAL_AER)
  247. dev_err(dev, "Fatal Error in AER Capability\n");
  248. if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
  249. dev_err(dev, "Non-Fatal Error in AER Capability\n");
  250. if (misc_stat & MSGF_MISC_SR_CORR_AER)
  251. dev_err(dev, "Correctable Error in AER Capability\n");
  252. if (misc_stat & MSGF_MISC_SR_UR_DETECT)
  253. dev_err(dev, "Unsupported request Detected\n");
  254. if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
  255. dev_err(dev, "Non-Fatal Error Detected\n");
  256. if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
  257. dev_err(dev, "Fatal Error Detected\n");
  258. if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
  259. dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
  260. if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
  261. dev_info(dev, "Link Bandwidth Management Status bit set\n");
  262. /* Clear misc interrupt status */
  263. nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
  264. return IRQ_HANDLED;
  265. }
  266. static void nwl_pcie_leg_handler(struct irq_desc *desc)
  267. {
  268. struct irq_chip *chip = irq_desc_get_chip(desc);
  269. struct nwl_pcie *pcie;
  270. unsigned long status;
  271. u32 bit;
  272. u32 virq;
  273. chained_irq_enter(chip, desc);
  274. pcie = irq_desc_get_handler_data(desc);
  275. while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
  276. MSGF_LEG_SR_MASKALL) != 0) {
  277. for_each_set_bit(bit, &status, PCI_NUM_INTX) {
  278. virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
  279. if (virq)
  280. generic_handle_irq(virq);
  281. }
  282. }
  283. chained_irq_exit(chip, desc);
  284. }
  285. static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
  286. {
  287. struct nwl_msi *msi;
  288. unsigned long status;
  289. u32 bit;
  290. u32 virq;
  291. msi = &pcie->msi;
  292. while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
  293. for_each_set_bit(bit, &status, 32) {
  294. nwl_bridge_writel(pcie, 1 << bit, status_reg);
  295. virq = irq_find_mapping(msi->dev_domain, bit);
  296. if (virq)
  297. generic_handle_irq(virq);
  298. }
  299. }
  300. }
  301. static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
  302. {
  303. struct irq_chip *chip = irq_desc_get_chip(desc);
  304. struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
  305. chained_irq_enter(chip, desc);
  306. nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
  307. chained_irq_exit(chip, desc);
  308. }
  309. static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
  310. {
  311. struct irq_chip *chip = irq_desc_get_chip(desc);
  312. struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
  313. chained_irq_enter(chip, desc);
  314. nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
  315. chained_irq_exit(chip, desc);
  316. }
  317. static void nwl_mask_leg_irq(struct irq_data *data)
  318. {
  319. struct irq_desc *desc = irq_to_desc(data->irq);
  320. struct nwl_pcie *pcie;
  321. unsigned long flags;
  322. u32 mask;
  323. u32 val;
  324. pcie = irq_desc_get_chip_data(desc);
  325. mask = 1 << (data->hwirq - 1);
  326. raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
  327. val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
  328. nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
  329. raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
  330. }
  331. static void nwl_unmask_leg_irq(struct irq_data *data)
  332. {
  333. struct irq_desc *desc = irq_to_desc(data->irq);
  334. struct nwl_pcie *pcie;
  335. unsigned long flags;
  336. u32 mask;
  337. u32 val;
  338. pcie = irq_desc_get_chip_data(desc);
  339. mask = 1 << (data->hwirq - 1);
  340. raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
  341. val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
  342. nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
  343. raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
  344. }
  345. static struct irq_chip nwl_leg_irq_chip = {
  346. .name = "nwl_pcie:legacy",
  347. .irq_enable = nwl_unmask_leg_irq,
  348. .irq_disable = nwl_mask_leg_irq,
  349. .irq_mask = nwl_mask_leg_irq,
  350. .irq_unmask = nwl_unmask_leg_irq,
  351. };
  352. static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
  353. irq_hw_number_t hwirq)
  354. {
  355. irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
  356. irq_set_chip_data(irq, domain->host_data);
  357. irq_set_status_flags(irq, IRQ_LEVEL);
  358. return 0;
  359. }
  360. static const struct irq_domain_ops legacy_domain_ops = {
  361. .map = nwl_legacy_map,
  362. .xlate = pci_irqd_intx_xlate,
  363. };
  364. #ifdef CONFIG_PCI_MSI
  365. static struct irq_chip nwl_msi_irq_chip = {
  366. .name = "nwl_pcie:msi",
  367. .irq_enable = unmask_msi_irq,
  368. .irq_disable = mask_msi_irq,
  369. .irq_mask = mask_msi_irq,
  370. .irq_unmask = unmask_msi_irq,
  371. };
  372. static struct msi_domain_info nwl_msi_domain_info = {
  373. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  374. MSI_FLAG_MULTI_PCI_MSI),
  375. .chip = &nwl_msi_irq_chip,
  376. };
  377. #endif
  378. static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  379. {
  380. struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
  381. phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
  382. msg->address_lo = lower_32_bits(msi_addr);
  383. msg->address_hi = upper_32_bits(msi_addr);
  384. msg->data = data->hwirq;
  385. }
  386. static int nwl_msi_set_affinity(struct irq_data *irq_data,
  387. const struct cpumask *mask, bool force)
  388. {
  389. return -EINVAL;
  390. }
  391. static struct irq_chip nwl_irq_chip = {
  392. .name = "Xilinx MSI",
  393. .irq_compose_msi_msg = nwl_compose_msi_msg,
  394. .irq_set_affinity = nwl_msi_set_affinity,
  395. };
  396. static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  397. unsigned int nr_irqs, void *args)
  398. {
  399. struct nwl_pcie *pcie = domain->host_data;
  400. struct nwl_msi *msi = &pcie->msi;
  401. int bit;
  402. int i;
  403. mutex_lock(&msi->lock);
  404. bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
  405. nr_irqs, 0);
  406. if (bit >= INT_PCI_MSI_NR) {
  407. mutex_unlock(&msi->lock);
  408. return -ENOSPC;
  409. }
  410. bitmap_set(msi->bitmap, bit, nr_irqs);
  411. for (i = 0; i < nr_irqs; i++) {
  412. irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
  413. domain->host_data, handle_simple_irq,
  414. NULL, NULL);
  415. }
  416. mutex_unlock(&msi->lock);
  417. return 0;
  418. }
  419. static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  420. unsigned int nr_irqs)
  421. {
  422. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  423. struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
  424. struct nwl_msi *msi = &pcie->msi;
  425. mutex_lock(&msi->lock);
  426. bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
  427. mutex_unlock(&msi->lock);
  428. }
  429. static const struct irq_domain_ops dev_msi_domain_ops = {
  430. .alloc = nwl_irq_domain_alloc,
  431. .free = nwl_irq_domain_free,
  432. };
  433. static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
  434. {
  435. #ifdef CONFIG_PCI_MSI
  436. struct device *dev = pcie->dev;
  437. struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
  438. struct nwl_msi *msi = &pcie->msi;
  439. msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
  440. &dev_msi_domain_ops, pcie);
  441. if (!msi->dev_domain) {
  442. dev_err(dev, "failed to create dev IRQ domain\n");
  443. return -ENOMEM;
  444. }
  445. msi->msi_domain = pci_msi_create_irq_domain(fwnode,
  446. &nwl_msi_domain_info,
  447. msi->dev_domain);
  448. if (!msi->msi_domain) {
  449. dev_err(dev, "failed to create msi IRQ domain\n");
  450. irq_domain_remove(msi->dev_domain);
  451. return -ENOMEM;
  452. }
  453. #endif
  454. return 0;
  455. }
  456. static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
  457. {
  458. struct device *dev = pcie->dev;
  459. struct device_node *node = dev->of_node;
  460. struct device_node *legacy_intc_node;
  461. legacy_intc_node = of_get_next_child(node, NULL);
  462. if (!legacy_intc_node) {
  463. dev_err(dev, "No legacy intc node found\n");
  464. return -EINVAL;
  465. }
  466. pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
  467. PCI_NUM_INTX,
  468. &legacy_domain_ops,
  469. pcie);
  470. if (!pcie->legacy_irq_domain) {
  471. dev_err(dev, "failed to create IRQ domain\n");
  472. return -ENOMEM;
  473. }
  474. raw_spin_lock_init(&pcie->leg_mask_lock);
  475. nwl_pcie_init_msi_irq_domain(pcie);
  476. return 0;
  477. }
  478. static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
  479. {
  480. struct device *dev = pcie->dev;
  481. struct platform_device *pdev = to_platform_device(dev);
  482. struct nwl_msi *msi = &pcie->msi;
  483. unsigned long base;
  484. int ret;
  485. int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
  486. mutex_init(&msi->lock);
  487. msi->bitmap = kzalloc(size, GFP_KERNEL);
  488. if (!msi->bitmap)
  489. return -ENOMEM;
  490. /* Get msi_1 IRQ number */
  491. msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
  492. if (msi->irq_msi1 < 0) {
  493. dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
  494. ret = -EINVAL;
  495. goto err;
  496. }
  497. irq_set_chained_handler_and_data(msi->irq_msi1,
  498. nwl_pcie_msi_handler_high, pcie);
  499. /* Get msi_0 IRQ number */
  500. msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
  501. if (msi->irq_msi0 < 0) {
  502. dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
  503. ret = -EINVAL;
  504. goto err;
  505. }
  506. irq_set_chained_handler_and_data(msi->irq_msi0,
  507. nwl_pcie_msi_handler_low, pcie);
  508. /* Check for msii_present bit */
  509. ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
  510. if (!ret) {
  511. dev_err(dev, "MSI not present\n");
  512. ret = -EIO;
  513. goto err;
  514. }
  515. /* Enable MSII */
  516. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
  517. MSII_ENABLE, I_MSII_CONTROL);
  518. /* Enable MSII status */
  519. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
  520. MSII_STATUS_ENABLE, I_MSII_CONTROL);
  521. /* setup AFI/FPCI range */
  522. base = pcie->phys_pcie_reg_base;
  523. nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
  524. nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
  525. /*
  526. * For high range MSI interrupts: disable, clear any pending,
  527. * and enable
  528. */
  529. nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
  530. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) &
  531. MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
  532. nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
  533. /*
  534. * For low range MSI interrupts: disable, clear any pending,
  535. * and enable
  536. */
  537. nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
  538. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
  539. MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
  540. nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
  541. return 0;
  542. err:
  543. kfree(msi->bitmap);
  544. msi->bitmap = NULL;
  545. return ret;
  546. }
  547. static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
  548. {
  549. struct device *dev = pcie->dev;
  550. struct platform_device *pdev = to_platform_device(dev);
  551. u32 breg_val, ecam_val, first_busno = 0;
  552. int err;
  553. breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
  554. if (!breg_val) {
  555. dev_err(dev, "BREG is not present\n");
  556. return breg_val;
  557. }
  558. /* Write bridge_off to breg base */
  559. nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
  560. E_BREG_BASE_LO);
  561. nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
  562. E_BREG_BASE_HI);
  563. /* Enable BREG */
  564. nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
  565. E_BREG_CONTROL);
  566. /* Disable DMA channel registers */
  567. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
  568. CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
  569. /* Enable Ingress subtractive decode translation */
  570. nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
  571. /* Enable msg filtering details */
  572. nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
  573. BRCFG_PCIE_RX_MSG_FILTER);
  574. err = nwl_wait_for_link(pcie);
  575. if (err)
  576. return err;
  577. ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
  578. if (!ecam_val) {
  579. dev_err(dev, "ECAM is not present\n");
  580. return ecam_val;
  581. }
  582. /* Enable ECAM */
  583. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
  584. E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
  585. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
  586. (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
  587. E_ECAM_CONTROL);
  588. nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
  589. E_ECAM_BASE_LO);
  590. nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
  591. E_ECAM_BASE_HI);
  592. /* Get bus range */
  593. ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
  594. pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
  595. /* Write primary, secondary and subordinate bus numbers */
  596. ecam_val = first_busno;
  597. ecam_val |= (first_busno + 1) << 8;
  598. ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
  599. writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
  600. if (nwl_pcie_link_up(pcie))
  601. dev_info(dev, "Link is UP\n");
  602. else
  603. dev_info(dev, "Link is DOWN\n");
  604. /* Get misc IRQ number */
  605. pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
  606. if (pcie->irq_misc < 0) {
  607. dev_err(dev, "failed to get misc IRQ %d\n",
  608. pcie->irq_misc);
  609. return -EINVAL;
  610. }
  611. err = devm_request_irq(dev, pcie->irq_misc,
  612. nwl_pcie_misc_handler, IRQF_SHARED,
  613. "nwl_pcie:misc", pcie);
  614. if (err) {
  615. dev_err(dev, "fail to register misc IRQ#%d\n",
  616. pcie->irq_misc);
  617. return err;
  618. }
  619. /* Disable all misc interrupts */
  620. nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
  621. /* Clear pending misc interrupts */
  622. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
  623. MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
  624. /* Enable all misc interrupts */
  625. nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
  626. /* Disable all legacy interrupts */
  627. nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
  628. /* Clear pending legacy interrupts */
  629. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
  630. MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
  631. /* Enable all legacy interrupts */
  632. nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
  633. /* Enable the bridge config interrupt */
  634. nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
  635. BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
  636. return 0;
  637. }
  638. static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
  639. struct platform_device *pdev)
  640. {
  641. struct device *dev = pcie->dev;
  642. struct device_node *node = dev->of_node;
  643. struct resource *res;
  644. const char *type;
  645. /* Check for device type */
  646. type = of_get_property(node, "device_type", NULL);
  647. if (!type || strcmp(type, "pci")) {
  648. dev_err(dev, "invalid \"device_type\" %s\n", type);
  649. return -EINVAL;
  650. }
  651. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
  652. pcie->breg_base = devm_ioremap_resource(dev, res);
  653. if (IS_ERR(pcie->breg_base))
  654. return PTR_ERR(pcie->breg_base);
  655. pcie->phys_breg_base = res->start;
  656. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
  657. pcie->pcireg_base = devm_ioremap_resource(dev, res);
  658. if (IS_ERR(pcie->pcireg_base))
  659. return PTR_ERR(pcie->pcireg_base);
  660. pcie->phys_pcie_reg_base = res->start;
  661. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
  662. pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
  663. if (IS_ERR(pcie->ecam_base))
  664. return PTR_ERR(pcie->ecam_base);
  665. pcie->phys_ecam_base = res->start;
  666. /* Get intx IRQ number */
  667. pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
  668. if (pcie->irq_intx < 0) {
  669. dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
  670. return pcie->irq_intx;
  671. }
  672. irq_set_chained_handler_and_data(pcie->irq_intx,
  673. nwl_pcie_leg_handler, pcie);
  674. return 0;
  675. }
  676. static const struct of_device_id nwl_pcie_of_match[] = {
  677. { .compatible = "xlnx,nwl-pcie-2.11", },
  678. {}
  679. };
  680. static int nwl_pcie_probe(struct platform_device *pdev)
  681. {
  682. struct device *dev = &pdev->dev;
  683. struct device_node *node = dev->of_node;
  684. struct nwl_pcie *pcie;
  685. struct pci_bus *bus;
  686. struct pci_bus *child;
  687. struct pci_host_bridge *bridge;
  688. int err;
  689. resource_size_t iobase = 0;
  690. LIST_HEAD(res);
  691. bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  692. if (!bridge)
  693. return -ENODEV;
  694. pcie = pci_host_bridge_priv(bridge);
  695. pcie->dev = dev;
  696. pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
  697. err = nwl_pcie_parse_dt(pcie, pdev);
  698. if (err) {
  699. dev_err(dev, "Parsing DT failed\n");
  700. return err;
  701. }
  702. err = nwl_pcie_bridge_init(pcie);
  703. if (err) {
  704. dev_err(dev, "HW Initialization failed\n");
  705. return err;
  706. }
  707. err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
  708. if (err) {
  709. dev_err(dev, "Getting bridge resources failed\n");
  710. return err;
  711. }
  712. err = devm_request_pci_bus_resources(dev, &res);
  713. if (err)
  714. goto error;
  715. err = nwl_pcie_init_irq_domain(pcie);
  716. if (err) {
  717. dev_err(dev, "Failed creating IRQ Domain\n");
  718. goto error;
  719. }
  720. list_splice_init(&res, &bridge->windows);
  721. bridge->dev.parent = dev;
  722. bridge->sysdata = pcie;
  723. bridge->busnr = pcie->root_busno;
  724. bridge->ops = &nwl_pcie_ops;
  725. bridge->map_irq = of_irq_parse_and_map_pci;
  726. bridge->swizzle_irq = pci_common_swizzle;
  727. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  728. err = nwl_pcie_enable_msi(pcie);
  729. if (err < 0) {
  730. dev_err(dev, "failed to enable MSI support: %d\n", err);
  731. goto error;
  732. }
  733. }
  734. err = pci_scan_root_bus_bridge(bridge);
  735. if (err)
  736. goto error;
  737. bus = bridge->bus;
  738. pci_assign_unassigned_bus_resources(bus);
  739. list_for_each_entry(child, &bus->children, node)
  740. pcie_bus_configure_settings(child);
  741. pci_bus_add_devices(bus);
  742. return 0;
  743. error:
  744. pci_free_resource_list(&res);
  745. return err;
  746. }
  747. static struct platform_driver nwl_pcie_driver = {
  748. .driver = {
  749. .name = "nwl-pcie",
  750. .suppress_bind_attrs = true,
  751. .of_match_table = nwl_pcie_of_match,
  752. },
  753. .probe = nwl_pcie_probe,
  754. };
  755. builtin_platform_driver(nwl_pcie_driver);