pci-mvebu.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe driver for Marvell Armada 370 and Armada XP SoCs
  4. *
  5. * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/pci.h>
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/gpio.h>
  12. #include <linux/init.h>
  13. #include <linux/mbus.h>
  14. #include <linux/msi.h>
  15. #include <linux/slab.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/of_address.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_gpio.h>
  20. #include <linux/of_pci.h>
  21. #include <linux/of_platform.h>
  22. /*
  23. * PCIe unit register offsets.
  24. */
  25. #define PCIE_DEV_ID_OFF 0x0000
  26. #define PCIE_CMD_OFF 0x0004
  27. #define PCIE_DEV_REV_OFF 0x0008
  28. #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
  29. #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
  30. #define PCIE_CAP_PCIEXP 0x0060
  31. #define PCIE_HEADER_LOG_4_OFF 0x0128
  32. #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
  33. #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
  34. #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
  35. #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
  36. #define PCIE_WIN5_CTRL_OFF 0x1880
  37. #define PCIE_WIN5_BASE_OFF 0x1884
  38. #define PCIE_WIN5_REMAP_OFF 0x188c
  39. #define PCIE_CONF_ADDR_OFF 0x18f8
  40. #define PCIE_CONF_ADDR_EN 0x80000000
  41. #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
  42. #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
  43. #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
  44. #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
  45. #define PCIE_CONF_ADDR(bus, devfn, where) \
  46. (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
  47. PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
  48. PCIE_CONF_ADDR_EN)
  49. #define PCIE_CONF_DATA_OFF 0x18fc
  50. #define PCIE_MASK_OFF 0x1910
  51. #define PCIE_MASK_ENABLE_INTS 0x0f000000
  52. #define PCIE_CTRL_OFF 0x1a00
  53. #define PCIE_CTRL_X1_MODE 0x0001
  54. #define PCIE_STAT_OFF 0x1a04
  55. #define PCIE_STAT_BUS 0xff00
  56. #define PCIE_STAT_DEV 0x1f0000
  57. #define PCIE_STAT_LINK_DOWN BIT(0)
  58. #define PCIE_RC_RTSTA 0x1a14
  59. #define PCIE_DEBUG_CTRL 0x1a60
  60. #define PCIE_DEBUG_SOFT_RESET BIT(20)
  61. enum {
  62. PCISWCAP = PCI_BRIDGE_CONTROL + 2,
  63. PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
  64. PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
  65. PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
  66. PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
  67. PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
  68. PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
  69. PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
  70. PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
  71. PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
  72. PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
  73. PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
  74. PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
  75. PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
  76. PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
  77. PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
  78. };
  79. /* PCI configuration space of a PCI-to-PCI bridge */
  80. struct mvebu_sw_pci_bridge {
  81. u16 vendor;
  82. u16 device;
  83. u16 command;
  84. u16 status;
  85. u16 class;
  86. u8 interface;
  87. u8 revision;
  88. u8 bist;
  89. u8 header_type;
  90. u8 latency_timer;
  91. u8 cache_line_size;
  92. u32 bar[2];
  93. u8 primary_bus;
  94. u8 secondary_bus;
  95. u8 subordinate_bus;
  96. u8 secondary_latency_timer;
  97. u8 iobase;
  98. u8 iolimit;
  99. u16 secondary_status;
  100. u16 membase;
  101. u16 memlimit;
  102. u16 iobaseupper;
  103. u16 iolimitupper;
  104. u32 romaddr;
  105. u8 intline;
  106. u8 intpin;
  107. u16 bridgectrl;
  108. /* PCI express capability */
  109. u32 pcie_sltcap;
  110. u16 pcie_devctl;
  111. u16 pcie_rtctl;
  112. };
  113. struct mvebu_pcie_port;
  114. /* Structure representing all PCIe interfaces */
  115. struct mvebu_pcie {
  116. struct platform_device *pdev;
  117. struct mvebu_pcie_port *ports;
  118. struct msi_controller *msi;
  119. struct resource io;
  120. struct resource realio;
  121. struct resource mem;
  122. struct resource busn;
  123. int nports;
  124. };
  125. struct mvebu_pcie_window {
  126. phys_addr_t base;
  127. phys_addr_t remap;
  128. size_t size;
  129. };
  130. /* Structure representing one PCIe interface */
  131. struct mvebu_pcie_port {
  132. char *name;
  133. void __iomem *base;
  134. u32 port;
  135. u32 lane;
  136. int devfn;
  137. unsigned int mem_target;
  138. unsigned int mem_attr;
  139. unsigned int io_target;
  140. unsigned int io_attr;
  141. struct clk *clk;
  142. struct gpio_desc *reset_gpio;
  143. char *reset_name;
  144. struct mvebu_sw_pci_bridge bridge;
  145. struct device_node *dn;
  146. struct mvebu_pcie *pcie;
  147. struct mvebu_pcie_window memwin;
  148. struct mvebu_pcie_window iowin;
  149. u32 saved_pcie_stat;
  150. };
  151. static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
  152. {
  153. writel(val, port->base + reg);
  154. }
  155. static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
  156. {
  157. return readl(port->base + reg);
  158. }
  159. static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
  160. {
  161. return port->io_target != -1 && port->io_attr != -1;
  162. }
  163. static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
  164. {
  165. return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
  166. }
  167. static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
  168. {
  169. u32 stat;
  170. stat = mvebu_readl(port, PCIE_STAT_OFF);
  171. stat &= ~PCIE_STAT_BUS;
  172. stat |= nr << 8;
  173. mvebu_writel(port, stat, PCIE_STAT_OFF);
  174. }
  175. static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
  176. {
  177. u32 stat;
  178. stat = mvebu_readl(port, PCIE_STAT_OFF);
  179. stat &= ~PCIE_STAT_DEV;
  180. stat |= nr << 16;
  181. mvebu_writel(port, stat, PCIE_STAT_OFF);
  182. }
  183. /*
  184. * Setup PCIE BARs and Address Decode Wins:
  185. * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
  186. * WIN[0-3] -> DRAM bank[0-3]
  187. */
  188. static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
  189. {
  190. const struct mbus_dram_target_info *dram;
  191. u32 size;
  192. int i;
  193. dram = mv_mbus_dram_info();
  194. /* First, disable and clear BARs and windows. */
  195. for (i = 1; i < 3; i++) {
  196. mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
  197. mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
  198. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
  199. }
  200. for (i = 0; i < 5; i++) {
  201. mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
  202. mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
  203. mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
  204. }
  205. mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
  206. mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
  207. mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
  208. /* Setup windows for DDR banks. Count total DDR size on the fly. */
  209. size = 0;
  210. for (i = 0; i < dram->num_cs; i++) {
  211. const struct mbus_dram_window *cs = dram->cs + i;
  212. mvebu_writel(port, cs->base & 0xffff0000,
  213. PCIE_WIN04_BASE_OFF(i));
  214. mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
  215. mvebu_writel(port,
  216. ((cs->size - 1) & 0xffff0000) |
  217. (cs->mbus_attr << 8) |
  218. (dram->mbus_dram_target_id << 4) | 1,
  219. PCIE_WIN04_CTRL_OFF(i));
  220. size += cs->size;
  221. }
  222. /* Round up 'size' to the nearest power of two. */
  223. if ((size & (size - 1)) != 0)
  224. size = 1 << fls(size);
  225. /* Setup BAR[1] to all DRAM banks. */
  226. mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
  227. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
  228. mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
  229. PCIE_BAR_CTRL_OFF(1));
  230. }
  231. static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
  232. {
  233. u32 cmd, mask;
  234. /* Point PCIe unit MBUS decode windows to DRAM space. */
  235. mvebu_pcie_setup_wins(port);
  236. /* Master + slave enable. */
  237. cmd = mvebu_readl(port, PCIE_CMD_OFF);
  238. cmd |= PCI_COMMAND_IO;
  239. cmd |= PCI_COMMAND_MEMORY;
  240. cmd |= PCI_COMMAND_MASTER;
  241. mvebu_writel(port, cmd, PCIE_CMD_OFF);
  242. /* Enable interrupt lines A-D. */
  243. mask = mvebu_readl(port, PCIE_MASK_OFF);
  244. mask |= PCIE_MASK_ENABLE_INTS;
  245. mvebu_writel(port, mask, PCIE_MASK_OFF);
  246. }
  247. static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
  248. struct pci_bus *bus,
  249. u32 devfn, int where, int size, u32 *val)
  250. {
  251. void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
  252. mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
  253. PCIE_CONF_ADDR_OFF);
  254. switch (size) {
  255. case 1:
  256. *val = readb_relaxed(conf_data + (where & 3));
  257. break;
  258. case 2:
  259. *val = readw_relaxed(conf_data + (where & 2));
  260. break;
  261. case 4:
  262. *val = readl_relaxed(conf_data);
  263. break;
  264. }
  265. return PCIBIOS_SUCCESSFUL;
  266. }
  267. static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
  268. struct pci_bus *bus,
  269. u32 devfn, int where, int size, u32 val)
  270. {
  271. void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
  272. mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
  273. PCIE_CONF_ADDR_OFF);
  274. switch (size) {
  275. case 1:
  276. writeb(val, conf_data + (where & 3));
  277. break;
  278. case 2:
  279. writew(val, conf_data + (where & 2));
  280. break;
  281. case 4:
  282. writel(val, conf_data);
  283. break;
  284. default:
  285. return PCIBIOS_BAD_REGISTER_NUMBER;
  286. }
  287. return PCIBIOS_SUCCESSFUL;
  288. }
  289. /*
  290. * Remove windows, starting from the largest ones to the smallest
  291. * ones.
  292. */
  293. static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
  294. phys_addr_t base, size_t size)
  295. {
  296. while (size) {
  297. size_t sz = 1 << (fls(size) - 1);
  298. mvebu_mbus_del_window(base, sz);
  299. base += sz;
  300. size -= sz;
  301. }
  302. }
  303. /*
  304. * MBus windows can only have a power of two size, but PCI BARs do not
  305. * have this constraint. Therefore, we have to split the PCI BAR into
  306. * areas each having a power of two size. We start from the largest
  307. * one (i.e highest order bit set in the size).
  308. */
  309. static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
  310. unsigned int target, unsigned int attribute,
  311. phys_addr_t base, size_t size,
  312. phys_addr_t remap)
  313. {
  314. size_t size_mapped = 0;
  315. while (size) {
  316. size_t sz = 1 << (fls(size) - 1);
  317. int ret;
  318. ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
  319. sz, remap);
  320. if (ret) {
  321. phys_addr_t end = base + sz - 1;
  322. dev_err(&port->pcie->pdev->dev,
  323. "Could not create MBus window at [mem %pa-%pa]: %d\n",
  324. &base, &end, ret);
  325. mvebu_pcie_del_windows(port, base - size_mapped,
  326. size_mapped);
  327. return;
  328. }
  329. size -= sz;
  330. size_mapped += sz;
  331. base += sz;
  332. if (remap != MVEBU_MBUS_NO_REMAP)
  333. remap += sz;
  334. }
  335. }
  336. static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
  337. unsigned int target, unsigned int attribute,
  338. const struct mvebu_pcie_window *desired,
  339. struct mvebu_pcie_window *cur)
  340. {
  341. if (desired->base == cur->base && desired->remap == cur->remap &&
  342. desired->size == cur->size)
  343. return;
  344. if (cur->size != 0) {
  345. mvebu_pcie_del_windows(port, cur->base, cur->size);
  346. cur->size = 0;
  347. cur->base = 0;
  348. /*
  349. * If something tries to change the window while it is enabled
  350. * the change will not be done atomically. That would be
  351. * difficult to do in the general case.
  352. */
  353. }
  354. if (desired->size == 0)
  355. return;
  356. mvebu_pcie_add_windows(port, target, attribute, desired->base,
  357. desired->size, desired->remap);
  358. *cur = *desired;
  359. }
  360. static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
  361. {
  362. struct mvebu_pcie_window desired = {};
  363. /* Are the new iobase/iolimit values invalid? */
  364. if (port->bridge.iolimit < port->bridge.iobase ||
  365. port->bridge.iolimitupper < port->bridge.iobaseupper ||
  366. !(port->bridge.command & PCI_COMMAND_IO)) {
  367. mvebu_pcie_set_window(port, port->io_target, port->io_attr,
  368. &desired, &port->iowin);
  369. return;
  370. }
  371. if (!mvebu_has_ioport(port)) {
  372. dev_WARN(&port->pcie->pdev->dev,
  373. "Attempt to set IO when IO is disabled\n");
  374. return;
  375. }
  376. /*
  377. * We read the PCI-to-PCI bridge emulated registers, and
  378. * calculate the base address and size of the address decoding
  379. * window to setup, according to the PCI-to-PCI bridge
  380. * specifications. iobase is the bus address, port->iowin_base
  381. * is the CPU address.
  382. */
  383. desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
  384. (port->bridge.iobaseupper << 16);
  385. desired.base = port->pcie->io.start + desired.remap;
  386. desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
  387. (port->bridge.iolimitupper << 16)) -
  388. desired.remap) +
  389. 1;
  390. mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
  391. &port->iowin);
  392. }
  393. static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
  394. {
  395. struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
  396. /* Are the new membase/memlimit values invalid? */
  397. if (port->bridge.memlimit < port->bridge.membase ||
  398. !(port->bridge.command & PCI_COMMAND_MEMORY)) {
  399. mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
  400. &desired, &port->memwin);
  401. return;
  402. }
  403. /*
  404. * We read the PCI-to-PCI bridge emulated registers, and
  405. * calculate the base address and size of the address decoding
  406. * window to setup, according to the PCI-to-PCI bridge
  407. * specifications.
  408. */
  409. desired.base = ((port->bridge.membase & 0xFFF0) << 16);
  410. desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
  411. desired.base + 1;
  412. mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
  413. &port->memwin);
  414. }
  415. /*
  416. * Initialize the configuration space of the PCI-to-PCI bridge
  417. * associated with the given PCIe interface.
  418. */
  419. static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
  420. {
  421. struct mvebu_sw_pci_bridge *bridge = &port->bridge;
  422. memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
  423. bridge->class = PCI_CLASS_BRIDGE_PCI;
  424. bridge->vendor = PCI_VENDOR_ID_MARVELL;
  425. bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
  426. bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
  427. bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
  428. bridge->cache_line_size = 0x10;
  429. /* We support 32 bits I/O addressing */
  430. bridge->iobase = PCI_IO_RANGE_TYPE_32;
  431. bridge->iolimit = PCI_IO_RANGE_TYPE_32;
  432. /* Add capabilities */
  433. bridge->status = PCI_STATUS_CAP_LIST;
  434. }
  435. /*
  436. * Read the configuration space of the PCI-to-PCI bridge associated to
  437. * the given PCIe interface.
  438. */
  439. static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
  440. unsigned int where, int size, u32 *value)
  441. {
  442. struct mvebu_sw_pci_bridge *bridge = &port->bridge;
  443. switch (where & ~3) {
  444. case PCI_VENDOR_ID:
  445. *value = bridge->device << 16 | bridge->vendor;
  446. break;
  447. case PCI_COMMAND:
  448. *value = bridge->command | bridge->status << 16;
  449. break;
  450. case PCI_CLASS_REVISION:
  451. *value = bridge->class << 16 | bridge->interface << 8 |
  452. bridge->revision;
  453. break;
  454. case PCI_CACHE_LINE_SIZE:
  455. *value = bridge->bist << 24 | bridge->header_type << 16 |
  456. bridge->latency_timer << 8 | bridge->cache_line_size;
  457. break;
  458. case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
  459. *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
  460. break;
  461. case PCI_PRIMARY_BUS:
  462. *value = (bridge->secondary_latency_timer << 24 |
  463. bridge->subordinate_bus << 16 |
  464. bridge->secondary_bus << 8 |
  465. bridge->primary_bus);
  466. break;
  467. case PCI_IO_BASE:
  468. if (!mvebu_has_ioport(port))
  469. *value = bridge->secondary_status << 16;
  470. else
  471. *value = (bridge->secondary_status << 16 |
  472. bridge->iolimit << 8 |
  473. bridge->iobase);
  474. break;
  475. case PCI_MEMORY_BASE:
  476. *value = (bridge->memlimit << 16 | bridge->membase);
  477. break;
  478. case PCI_PREF_MEMORY_BASE:
  479. *value = 0;
  480. break;
  481. case PCI_IO_BASE_UPPER16:
  482. *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
  483. break;
  484. case PCI_CAPABILITY_LIST:
  485. *value = PCISWCAP;
  486. break;
  487. case PCI_ROM_ADDRESS1:
  488. *value = 0;
  489. break;
  490. case PCI_INTERRUPT_LINE:
  491. /* LINE PIN MIN_GNT MAX_LAT */
  492. *value = 0;
  493. break;
  494. case PCISWCAP_EXP_LIST_ID:
  495. /* Set PCIe v2, root port, slot support */
  496. *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
  497. PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
  498. break;
  499. case PCISWCAP_EXP_DEVCAP:
  500. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
  501. break;
  502. case PCISWCAP_EXP_DEVCTL:
  503. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
  504. ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
  505. PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
  506. *value |= bridge->pcie_devctl;
  507. break;
  508. case PCISWCAP_EXP_LNKCAP:
  509. /*
  510. * PCIe requires the clock power management capability to be
  511. * hard-wired to zero for downstream ports
  512. */
  513. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
  514. ~PCI_EXP_LNKCAP_CLKPM;
  515. break;
  516. case PCISWCAP_EXP_LNKCTL:
  517. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
  518. break;
  519. case PCISWCAP_EXP_SLTCAP:
  520. *value = bridge->pcie_sltcap;
  521. break;
  522. case PCISWCAP_EXP_SLTCTL:
  523. *value = PCI_EXP_SLTSTA_PDS << 16;
  524. break;
  525. case PCISWCAP_EXP_RTCTL:
  526. *value = bridge->pcie_rtctl;
  527. break;
  528. case PCISWCAP_EXP_RTSTA:
  529. *value = mvebu_readl(port, PCIE_RC_RTSTA);
  530. break;
  531. /* PCIe requires the v2 fields to be hard-wired to zero */
  532. case PCISWCAP_EXP_DEVCAP2:
  533. case PCISWCAP_EXP_DEVCTL2:
  534. case PCISWCAP_EXP_LNKCAP2:
  535. case PCISWCAP_EXP_LNKCTL2:
  536. case PCISWCAP_EXP_SLTCAP2:
  537. case PCISWCAP_EXP_SLTCTL2:
  538. default:
  539. /*
  540. * PCI defines configuration read accesses to reserved or
  541. * unimplemented registers to read as zero and complete
  542. * normally.
  543. */
  544. *value = 0;
  545. return PCIBIOS_SUCCESSFUL;
  546. }
  547. if (size == 2)
  548. *value = (*value >> (8 * (where & 3))) & 0xffff;
  549. else if (size == 1)
  550. *value = (*value >> (8 * (where & 3))) & 0xff;
  551. return PCIBIOS_SUCCESSFUL;
  552. }
  553. /* Write to the PCI-to-PCI bridge configuration space */
  554. static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
  555. unsigned int where, int size, u32 value)
  556. {
  557. struct mvebu_sw_pci_bridge *bridge = &port->bridge;
  558. u32 mask, reg;
  559. int err;
  560. if (size == 4)
  561. mask = 0x0;
  562. else if (size == 2)
  563. mask = ~(0xffff << ((where & 3) * 8));
  564. else if (size == 1)
  565. mask = ~(0xff << ((where & 3) * 8));
  566. else
  567. return PCIBIOS_BAD_REGISTER_NUMBER;
  568. err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
  569. if (err)
  570. return err;
  571. value = (reg & mask) | value << ((where & 3) * 8);
  572. switch (where & ~3) {
  573. case PCI_COMMAND:
  574. {
  575. u32 old = bridge->command;
  576. if (!mvebu_has_ioport(port))
  577. value &= ~PCI_COMMAND_IO;
  578. bridge->command = value & 0xffff;
  579. if ((old ^ bridge->command) & PCI_COMMAND_IO)
  580. mvebu_pcie_handle_iobase_change(port);
  581. if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
  582. mvebu_pcie_handle_membase_change(port);
  583. break;
  584. }
  585. case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
  586. bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
  587. break;
  588. case PCI_IO_BASE:
  589. /*
  590. * We also keep bit 1 set, it is a read-only bit that
  591. * indicates we support 32 bits addressing for the
  592. * I/O
  593. */
  594. bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
  595. bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
  596. mvebu_pcie_handle_iobase_change(port);
  597. break;
  598. case PCI_MEMORY_BASE:
  599. bridge->membase = value & 0xffff;
  600. bridge->memlimit = value >> 16;
  601. mvebu_pcie_handle_membase_change(port);
  602. break;
  603. case PCI_IO_BASE_UPPER16:
  604. bridge->iobaseupper = value & 0xffff;
  605. bridge->iolimitupper = value >> 16;
  606. mvebu_pcie_handle_iobase_change(port);
  607. break;
  608. case PCI_PRIMARY_BUS:
  609. bridge->primary_bus = value & 0xff;
  610. bridge->secondary_bus = (value >> 8) & 0xff;
  611. bridge->subordinate_bus = (value >> 16) & 0xff;
  612. bridge->secondary_latency_timer = (value >> 24) & 0xff;
  613. mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
  614. break;
  615. case PCISWCAP_EXP_DEVCTL:
  616. /*
  617. * Armada370 data says these bits must always
  618. * be zero when in root complex mode.
  619. */
  620. value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
  621. PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
  622. /*
  623. * If the mask is 0xffff0000, then we only want to write
  624. * the device control register, rather than clearing the
  625. * RW1C bits in the device status register. Mask out the
  626. * status register bits.
  627. */
  628. if (mask == 0xffff0000)
  629. value &= 0xffff;
  630. mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
  631. break;
  632. case PCISWCAP_EXP_LNKCTL:
  633. /*
  634. * If we don't support CLKREQ, we must ensure that the
  635. * CLKREQ enable bit always reads zero. Since we haven't
  636. * had this capability, and it's dependent on board wiring,
  637. * disable it for the time being.
  638. */
  639. value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
  640. /*
  641. * If the mask is 0xffff0000, then we only want to write
  642. * the link control register, rather than clearing the
  643. * RW1C bits in the link status register. Mask out the
  644. * RW1C status register bits.
  645. */
  646. if (mask == 0xffff0000)
  647. value &= ~((PCI_EXP_LNKSTA_LABS |
  648. PCI_EXP_LNKSTA_LBMS) << 16);
  649. mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
  650. break;
  651. case PCISWCAP_EXP_RTSTA:
  652. mvebu_writel(port, value, PCIE_RC_RTSTA);
  653. break;
  654. default:
  655. break;
  656. }
  657. return PCIBIOS_SUCCESSFUL;
  658. }
  659. static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
  660. {
  661. return sys->private_data;
  662. }
  663. static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
  664. struct pci_bus *bus,
  665. int devfn)
  666. {
  667. int i;
  668. for (i = 0; i < pcie->nports; i++) {
  669. struct mvebu_pcie_port *port = &pcie->ports[i];
  670. if (bus->number == 0 && port->devfn == devfn)
  671. return port;
  672. if (bus->number != 0 &&
  673. bus->number >= port->bridge.secondary_bus &&
  674. bus->number <= port->bridge.subordinate_bus)
  675. return port;
  676. }
  677. return NULL;
  678. }
  679. /* PCI configuration space write function */
  680. static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  681. int where, int size, u32 val)
  682. {
  683. struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
  684. struct mvebu_pcie_port *port;
  685. int ret;
  686. port = mvebu_pcie_find_port(pcie, bus, devfn);
  687. if (!port)
  688. return PCIBIOS_DEVICE_NOT_FOUND;
  689. /* Access the emulated PCI-to-PCI bridge */
  690. if (bus->number == 0)
  691. return mvebu_sw_pci_bridge_write(port, where, size, val);
  692. if (!mvebu_pcie_link_up(port))
  693. return PCIBIOS_DEVICE_NOT_FOUND;
  694. /* Access the real PCIe interface */
  695. ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
  696. where, size, val);
  697. return ret;
  698. }
  699. /* PCI configuration space read function */
  700. static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  701. int size, u32 *val)
  702. {
  703. struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
  704. struct mvebu_pcie_port *port;
  705. int ret;
  706. port = mvebu_pcie_find_port(pcie, bus, devfn);
  707. if (!port) {
  708. *val = 0xffffffff;
  709. return PCIBIOS_DEVICE_NOT_FOUND;
  710. }
  711. /* Access the emulated PCI-to-PCI bridge */
  712. if (bus->number == 0)
  713. return mvebu_sw_pci_bridge_read(port, where, size, val);
  714. if (!mvebu_pcie_link_up(port)) {
  715. *val = 0xffffffff;
  716. return PCIBIOS_DEVICE_NOT_FOUND;
  717. }
  718. /* Access the real PCIe interface */
  719. ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
  720. where, size, val);
  721. return ret;
  722. }
  723. static struct pci_ops mvebu_pcie_ops = {
  724. .read = mvebu_pcie_rd_conf,
  725. .write = mvebu_pcie_wr_conf,
  726. };
  727. static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
  728. {
  729. struct mvebu_pcie *pcie = sys_to_pcie(sys);
  730. int err, i;
  731. pcie->mem.name = "PCI MEM";
  732. pcie->realio.name = "PCI I/O";
  733. if (resource_size(&pcie->realio) != 0)
  734. pci_add_resource_offset(&sys->resources, &pcie->realio,
  735. sys->io_offset);
  736. pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
  737. pci_add_resource(&sys->resources, &pcie->busn);
  738. err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
  739. if (err)
  740. return 0;
  741. for (i = 0; i < pcie->nports; i++) {
  742. struct mvebu_pcie_port *port = &pcie->ports[i];
  743. if (!port->base)
  744. continue;
  745. mvebu_pcie_setup_hw(port);
  746. }
  747. return 1;
  748. }
  749. static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
  750. const struct resource *res,
  751. resource_size_t start,
  752. resource_size_t size,
  753. resource_size_t align)
  754. {
  755. if (dev->bus->number != 0)
  756. return start;
  757. /*
  758. * On the PCI-to-PCI bridge side, the I/O windows must have at
  759. * least a 64 KB size and the memory windows must have at
  760. * least a 1 MB size. Moreover, MBus windows need to have a
  761. * base address aligned on their size, and their size must be
  762. * a power of two. This means that if the BAR doesn't have a
  763. * power of two size, several MBus windows will actually be
  764. * created. We need to ensure that the biggest MBus window
  765. * (which will be the first one) is aligned on its size, which
  766. * explains the rounddown_pow_of_two() being done here.
  767. */
  768. if (res->flags & IORESOURCE_IO)
  769. return round_up(start, max_t(resource_size_t, SZ_64K,
  770. rounddown_pow_of_two(size)));
  771. else if (res->flags & IORESOURCE_MEM)
  772. return round_up(start, max_t(resource_size_t, SZ_1M,
  773. rounddown_pow_of_two(size)));
  774. else
  775. return start;
  776. }
  777. static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
  778. {
  779. struct hw_pci hw;
  780. memset(&hw, 0, sizeof(hw));
  781. #ifdef CONFIG_PCI_MSI
  782. hw.msi_ctrl = pcie->msi;
  783. #endif
  784. hw.nr_controllers = 1;
  785. hw.private_data = (void **)&pcie;
  786. hw.setup = mvebu_pcie_setup;
  787. hw.map_irq = of_irq_parse_and_map_pci;
  788. hw.ops = &mvebu_pcie_ops;
  789. hw.align_resource = mvebu_pcie_align_resource;
  790. pci_common_init_dev(&pcie->pdev->dev, &hw);
  791. }
  792. /*
  793. * Looks up the list of register addresses encoded into the reg =
  794. * <...> property for one that matches the given port/lane. Once
  795. * found, maps it.
  796. */
  797. static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
  798. struct device_node *np,
  799. struct mvebu_pcie_port *port)
  800. {
  801. struct resource regs;
  802. int ret = 0;
  803. ret = of_address_to_resource(np, 0, &regs);
  804. if (ret)
  805. return ERR_PTR(ret);
  806. return devm_ioremap_resource(&pdev->dev, &regs);
  807. }
  808. #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
  809. #define DT_TYPE_IO 0x1
  810. #define DT_TYPE_MEM32 0x2
  811. #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
  812. #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
  813. static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
  814. unsigned long type,
  815. unsigned int *tgt,
  816. unsigned int *attr)
  817. {
  818. const int na = 3, ns = 2;
  819. const __be32 *range;
  820. int rlen, nranges, rangesz, pna, i;
  821. *tgt = -1;
  822. *attr = -1;
  823. range = of_get_property(np, "ranges", &rlen);
  824. if (!range)
  825. return -EINVAL;
  826. pna = of_n_addr_cells(np);
  827. rangesz = pna + na + ns;
  828. nranges = rlen / sizeof(__be32) / rangesz;
  829. for (i = 0; i < nranges; i++, range += rangesz) {
  830. u32 flags = of_read_number(range, 1);
  831. u32 slot = of_read_number(range + 1, 1);
  832. u64 cpuaddr = of_read_number(range + na, pna);
  833. unsigned long rtype;
  834. if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
  835. rtype = IORESOURCE_IO;
  836. else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
  837. rtype = IORESOURCE_MEM;
  838. else
  839. continue;
  840. if (slot == PCI_SLOT(devfn) && type == rtype) {
  841. *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
  842. *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
  843. return 0;
  844. }
  845. }
  846. return -ENOENT;
  847. }
  848. #ifdef CONFIG_PM_SLEEP
  849. static int mvebu_pcie_suspend(struct device *dev)
  850. {
  851. struct mvebu_pcie *pcie;
  852. int i;
  853. pcie = dev_get_drvdata(dev);
  854. for (i = 0; i < pcie->nports; i++) {
  855. struct mvebu_pcie_port *port = pcie->ports + i;
  856. port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
  857. }
  858. return 0;
  859. }
  860. static int mvebu_pcie_resume(struct device *dev)
  861. {
  862. struct mvebu_pcie *pcie;
  863. int i;
  864. pcie = dev_get_drvdata(dev);
  865. for (i = 0; i < pcie->nports; i++) {
  866. struct mvebu_pcie_port *port = pcie->ports + i;
  867. mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
  868. mvebu_pcie_setup_hw(port);
  869. }
  870. return 0;
  871. }
  872. #endif
  873. static void mvebu_pcie_port_clk_put(void *data)
  874. {
  875. struct mvebu_pcie_port *port = data;
  876. clk_put(port->clk);
  877. }
  878. static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
  879. struct mvebu_pcie_port *port, struct device_node *child)
  880. {
  881. struct device *dev = &pcie->pdev->dev;
  882. enum of_gpio_flags flags;
  883. int reset_gpio, ret;
  884. port->pcie = pcie;
  885. if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
  886. dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
  887. child);
  888. goto skip;
  889. }
  890. if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
  891. port->lane = 0;
  892. port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
  893. port->lane);
  894. if (!port->name) {
  895. ret = -ENOMEM;
  896. goto err;
  897. }
  898. port->devfn = of_pci_get_devfn(child);
  899. if (port->devfn < 0)
  900. goto skip;
  901. ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
  902. &port->mem_target, &port->mem_attr);
  903. if (ret < 0) {
  904. dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
  905. port->name);
  906. goto skip;
  907. }
  908. if (resource_size(&pcie->io) != 0) {
  909. mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
  910. &port->io_target, &port->io_attr);
  911. } else {
  912. port->io_target = -1;
  913. port->io_attr = -1;
  914. }
  915. reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
  916. if (reset_gpio == -EPROBE_DEFER) {
  917. ret = reset_gpio;
  918. goto err;
  919. }
  920. if (gpio_is_valid(reset_gpio)) {
  921. unsigned long gpio_flags;
  922. port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
  923. port->name);
  924. if (!port->reset_name) {
  925. ret = -ENOMEM;
  926. goto err;
  927. }
  928. if (flags & OF_GPIO_ACTIVE_LOW) {
  929. dev_info(dev, "%pOF: reset gpio is active low\n",
  930. child);
  931. gpio_flags = GPIOF_ACTIVE_LOW |
  932. GPIOF_OUT_INIT_LOW;
  933. } else {
  934. gpio_flags = GPIOF_OUT_INIT_HIGH;
  935. }
  936. ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
  937. port->reset_name);
  938. if (ret) {
  939. if (ret == -EPROBE_DEFER)
  940. goto err;
  941. goto skip;
  942. }
  943. port->reset_gpio = gpio_to_desc(reset_gpio);
  944. }
  945. port->clk = of_clk_get_by_name(child, NULL);
  946. if (IS_ERR(port->clk)) {
  947. dev_err(dev, "%s: cannot get clock\n", port->name);
  948. goto skip;
  949. }
  950. ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
  951. if (ret < 0) {
  952. clk_put(port->clk);
  953. goto err;
  954. }
  955. return 1;
  956. skip:
  957. ret = 0;
  958. /* In the case of skipping, we need to free these */
  959. devm_kfree(dev, port->reset_name);
  960. port->reset_name = NULL;
  961. devm_kfree(dev, port->name);
  962. port->name = NULL;
  963. err:
  964. return ret;
  965. }
  966. /*
  967. * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
  968. * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
  969. * of the PCI Express Card Electromechanical Specification, 1.1.
  970. */
  971. static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
  972. {
  973. int ret;
  974. ret = clk_prepare_enable(port->clk);
  975. if (ret < 0)
  976. return ret;
  977. if (port->reset_gpio) {
  978. u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
  979. of_property_read_u32(port->dn, "reset-delay-us",
  980. &reset_udelay);
  981. udelay(100);
  982. gpiod_set_value_cansleep(port->reset_gpio, 0);
  983. msleep(reset_udelay / 1000);
  984. }
  985. return 0;
  986. }
  987. /*
  988. * Power down a PCIe port. Strictly, PCIe requires us to place the card
  989. * in D3hot state before asserting PERST#.
  990. */
  991. static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
  992. {
  993. gpiod_set_value_cansleep(port->reset_gpio, 1);
  994. clk_disable_unprepare(port->clk);
  995. }
  996. static int mvebu_pcie_probe(struct platform_device *pdev)
  997. {
  998. struct device *dev = &pdev->dev;
  999. struct mvebu_pcie *pcie;
  1000. struct device_node *np = dev->of_node;
  1001. struct device_node *child;
  1002. int num, i, ret;
  1003. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  1004. if (!pcie)
  1005. return -ENOMEM;
  1006. pcie->pdev = pdev;
  1007. platform_set_drvdata(pdev, pcie);
  1008. /* Get the PCIe memory and I/O aperture */
  1009. mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
  1010. if (resource_size(&pcie->mem) == 0) {
  1011. dev_err(dev, "invalid memory aperture size\n");
  1012. return -EINVAL;
  1013. }
  1014. mvebu_mbus_get_pcie_io_aperture(&pcie->io);
  1015. if (resource_size(&pcie->io) != 0) {
  1016. pcie->realio.flags = pcie->io.flags;
  1017. pcie->realio.start = PCIBIOS_MIN_IO;
  1018. pcie->realio.end = min_t(resource_size_t,
  1019. IO_SPACE_LIMIT,
  1020. resource_size(&pcie->io));
  1021. } else
  1022. pcie->realio = pcie->io;
  1023. /* Get the bus range */
  1024. ret = of_pci_parse_bus_range(np, &pcie->busn);
  1025. if (ret) {
  1026. dev_err(dev, "failed to parse bus-range property: %d\n", ret);
  1027. return ret;
  1028. }
  1029. num = of_get_available_child_count(np);
  1030. pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
  1031. if (!pcie->ports)
  1032. return -ENOMEM;
  1033. i = 0;
  1034. for_each_available_child_of_node(np, child) {
  1035. struct mvebu_pcie_port *port = &pcie->ports[i];
  1036. ret = mvebu_pcie_parse_port(pcie, port, child);
  1037. if (ret < 0) {
  1038. of_node_put(child);
  1039. return ret;
  1040. } else if (ret == 0) {
  1041. continue;
  1042. }
  1043. port->dn = child;
  1044. i++;
  1045. }
  1046. pcie->nports = i;
  1047. for (i = 0; i < pcie->nports; i++) {
  1048. struct mvebu_pcie_port *port = &pcie->ports[i];
  1049. child = port->dn;
  1050. if (!child)
  1051. continue;
  1052. ret = mvebu_pcie_powerup(port);
  1053. if (ret < 0)
  1054. continue;
  1055. port->base = mvebu_pcie_map_registers(pdev, child, port);
  1056. if (IS_ERR(port->base)) {
  1057. dev_err(dev, "%s: cannot map registers\n", port->name);
  1058. port->base = NULL;
  1059. mvebu_pcie_powerdown(port);
  1060. continue;
  1061. }
  1062. mvebu_pcie_set_local_dev_nr(port, 1);
  1063. mvebu_sw_pci_bridge_init(port);
  1064. }
  1065. pcie->nports = i;
  1066. for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
  1067. pci_ioremap_io(i, pcie->io.start + i);
  1068. mvebu_pcie_enable(pcie);
  1069. platform_set_drvdata(pdev, pcie);
  1070. return 0;
  1071. }
  1072. static const struct of_device_id mvebu_pcie_of_match_table[] = {
  1073. { .compatible = "marvell,armada-xp-pcie", },
  1074. { .compatible = "marvell,armada-370-pcie", },
  1075. { .compatible = "marvell,dove-pcie", },
  1076. { .compatible = "marvell,kirkwood-pcie", },
  1077. {},
  1078. };
  1079. static const struct dev_pm_ops mvebu_pcie_pm_ops = {
  1080. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
  1081. };
  1082. static struct platform_driver mvebu_pcie_driver = {
  1083. .driver = {
  1084. .name = "mvebu-pcie",
  1085. .of_match_table = mvebu_pcie_of_match_table,
  1086. /* driver unloading/unbinding currently not supported */
  1087. .suppress_bind_attrs = true,
  1088. .pm = &mvebu_pcie_pm_ops,
  1089. },
  1090. .probe = mvebu_pcie_probe,
  1091. };
  1092. builtin_platform_driver(mvebu_pcie_driver);