pci_gx.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592
  1. /*
  2. * Copyright 2012 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mmzone.h>
  16. #include <linux/pci.h>
  17. #include <linux/delay.h>
  18. #include <linux/string.h>
  19. #include <linux/init.h>
  20. #include <linux/capability.h>
  21. #include <linux/sched.h>
  22. #include <linux/errno.h>
  23. #include <linux/irq.h>
  24. #include <linux/msi.h>
  25. #include <linux/io.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/ctype.h>
  28. #include <asm/processor.h>
  29. #include <asm/sections.h>
  30. #include <asm/byteorder.h>
  31. #include <gxio/iorpc_globals.h>
  32. #include <gxio/kiorpc.h>
  33. #include <gxio/trio.h>
  34. #include <gxio/iorpc_trio.h>
  35. #include <hv/drv_trio_intf.h>
  36. #include <arch/sim.h>
  37. /*
  38. * This file contains the routines to search for PCI buses,
  39. * enumerate the buses, and configure any attached devices.
  40. */
  41. #define DEBUG_PCI_CFG 0
  42. #if DEBUG_PCI_CFG
  43. #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
  44. pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
  45. size, val, bus, dev, func, offset & 0xFFF);
  46. #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
  47. pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
  48. size, val, bus, dev, func, offset & 0xFFF);
  49. #else
  50. #define TRACE_CFG_WR(...)
  51. #define TRACE_CFG_RD(...)
  52. #endif
  53. static int pci_probe = 1;
  54. /* Information on the PCIe RC ports configuration. */
  55. static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
  56. /*
  57. * On some platforms with one or more Gx endpoint ports, we need to
  58. * delay the PCIe RC port probe for a few seconds to work around
  59. * a HW PCIe link-training bug. The exact delay is specified with
  60. * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
  61. * where T is the TRIO instance number, P is the port number and S is
  62. * the delay in seconds. If the argument is specified, but the delay is
  63. * not provided, the value will be DEFAULT_RC_DELAY.
  64. */
  65. static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
  66. /* Default number of seconds that the PCIe RC port probe can be delayed. */
  67. #define DEFAULT_RC_DELAY 10
  68. /* The PCI I/O space size in each PCI domain. */
  69. #define IO_SPACE_SIZE 0x10000
  70. /* Provide shorter versions of some very long constant names. */
  71. #define AUTO_CONFIG_RC \
  72. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
  73. #define AUTO_CONFIG_RC_G1 \
  74. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
  75. #define AUTO_CONFIG_EP \
  76. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
  77. #define AUTO_CONFIG_EP_G1 \
  78. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
  79. /* Array of the PCIe ports configuration info obtained from the BIB. */
  80. struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
  81. /* Number of configured TRIO instances. */
  82. int num_trio_shims;
  83. /* All drivers share the TRIO contexts defined here. */
  84. gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
  85. /* Pointer to an array of PCIe RC controllers. */
  86. struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
  87. int num_rc_controllers;
  88. static struct pci_ops tile_cfg_ops;
  89. /* Mask of CPUs that should receive PCIe interrupts. */
  90. static struct cpumask intr_cpus_map;
  91. /*
  92. * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
  93. * For now, we simply send interrupts to non-dataplane CPUs.
  94. * We may implement methods to allow user to specify the target CPUs,
  95. * e.g. via boot arguments.
  96. */
  97. static int tile_irq_cpu(int irq)
  98. {
  99. unsigned int count;
  100. int i = 0;
  101. int cpu;
  102. count = cpumask_weight(&intr_cpus_map);
  103. if (unlikely(count == 0)) {
  104. pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n");
  105. return irq % (smp_height * smp_width);
  106. }
  107. count = irq % count;
  108. for_each_cpu(cpu, &intr_cpus_map) {
  109. if (i++ == count)
  110. break;
  111. }
  112. return cpu;
  113. }
  114. /* Open a file descriptor to the TRIO shim. */
  115. static int tile_pcie_open(int trio_index)
  116. {
  117. gxio_trio_context_t *context = &trio_contexts[trio_index];
  118. int ret;
  119. int mac;
  120. /* This opens a file descriptor to the TRIO shim. */
  121. ret = gxio_trio_init(context, trio_index);
  122. if (ret < 0)
  123. goto gxio_trio_init_failure;
  124. /* Allocate an ASID for the kernel. */
  125. ret = gxio_trio_alloc_asids(context, 1, 0, 0);
  126. if (ret < 0) {
  127. pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
  128. trio_index);
  129. goto asid_alloc_failure;
  130. }
  131. context->asid = ret;
  132. #ifdef USE_SHARED_PCIE_CONFIG_REGION
  133. /*
  134. * Alloc a PIO region for config access, shared by all MACs per TRIO.
  135. * This shouldn't fail since the kernel is supposed to the first
  136. * client of the TRIO's PIO regions.
  137. */
  138. ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
  139. if (ret < 0) {
  140. pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
  141. trio_index);
  142. goto pio_alloc_failure;
  143. }
  144. context->pio_cfg_index = ret;
  145. /*
  146. * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
  147. * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
  148. */
  149. ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
  150. 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
  151. if (ret < 0) {
  152. pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
  153. trio_index);
  154. goto pio_alloc_failure;
  155. }
  156. #endif
  157. /* Get the properties of the PCIe ports on this TRIO instance. */
  158. ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
  159. if (ret < 0) {
  160. pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
  161. ret, trio_index);
  162. goto get_port_property_failure;
  163. }
  164. context->mmio_base_mac =
  165. iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
  166. if (context->mmio_base_mac == NULL) {
  167. pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
  168. ret, trio_index);
  169. ret = -ENOMEM;
  170. goto trio_mmio_mapping_failure;
  171. }
  172. /* Check the port strap state which will override the BIB setting. */
  173. for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
  174. TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
  175. unsigned int reg_offset;
  176. /* Ignore ports that are not specified in the BIB. */
  177. if (!pcie_ports[trio_index].ports[mac].allow_rc &&
  178. !pcie_ports[trio_index].ports[mac].allow_ep)
  179. continue;
  180. reg_offset =
  181. (TRIO_PCIE_INTFC_PORT_CONFIG <<
  182. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  183. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  184. TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
  185. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  186. port_config.word =
  187. __gxio_mmio_read(context->mmio_base_mac + reg_offset);
  188. if (port_config.strap_state != AUTO_CONFIG_RC &&
  189. port_config.strap_state != AUTO_CONFIG_RC_G1) {
  190. /*
  191. * If this is really intended to be an EP port, record
  192. * it so that the endpoint driver will know about it.
  193. */
  194. if (port_config.strap_state == AUTO_CONFIG_EP ||
  195. port_config.strap_state == AUTO_CONFIG_EP_G1)
  196. pcie_ports[trio_index].ports[mac].allow_ep = 1;
  197. }
  198. }
  199. return ret;
  200. trio_mmio_mapping_failure:
  201. get_port_property_failure:
  202. asid_alloc_failure:
  203. #ifdef USE_SHARED_PCIE_CONFIG_REGION
  204. pio_alloc_failure:
  205. #endif
  206. hv_dev_close(context->fd);
  207. gxio_trio_init_failure:
  208. context->fd = -1;
  209. return ret;
  210. }
  211. static int __init tile_trio_init(void)
  212. {
  213. int i;
  214. /* We loop over all the TRIO shims. */
  215. for (i = 0; i < TILEGX_NUM_TRIO; i++) {
  216. if (tile_pcie_open(i) < 0)
  217. continue;
  218. num_trio_shims++;
  219. }
  220. return 0;
  221. }
  222. postcore_initcall(tile_trio_init);
  223. static void tilegx_legacy_irq_ack(struct irq_data *d)
  224. {
  225. __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
  226. }
  227. static void tilegx_legacy_irq_mask(struct irq_data *d)
  228. {
  229. __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
  230. }
  231. static void tilegx_legacy_irq_unmask(struct irq_data *d)
  232. {
  233. __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
  234. }
  235. static struct irq_chip tilegx_legacy_irq_chip = {
  236. .name = "tilegx_legacy_irq",
  237. .irq_ack = tilegx_legacy_irq_ack,
  238. .irq_mask = tilegx_legacy_irq_mask,
  239. .irq_unmask = tilegx_legacy_irq_unmask,
  240. /* TBD: support set_affinity. */
  241. };
  242. /*
  243. * This is a wrapper function of the kernel level-trigger interrupt
  244. * handler handle_level_irq() for PCI legacy interrupts. The TRIO
  245. * is configured such that only INTx Assert interrupts are proxied
  246. * to Linux which just calls handle_level_irq() after clearing the
  247. * MAC INTx Assert status bit associated with this interrupt.
  248. */
  249. static void trio_handle_level_irq(struct irq_desc *desc)
  250. {
  251. struct pci_controller *controller = irq_desc_get_handler_data(desc);
  252. gxio_trio_context_t *trio_context = controller->trio;
  253. uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
  254. int mac = controller->mac;
  255. unsigned int reg_offset;
  256. uint64_t level_mask;
  257. handle_level_irq(desc);
  258. /*
  259. * Clear the INTx Level status, otherwise future interrupts are
  260. * not sent.
  261. */
  262. reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
  263. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  264. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  265. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  266. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  267. level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
  268. __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
  269. }
  270. /*
  271. * Create kernel irqs and set up the handlers for the legacy interrupts.
  272. * Also some minimum initialization for the MSI support.
  273. */
  274. static int tile_init_irqs(struct pci_controller *controller)
  275. {
  276. int i;
  277. int j;
  278. int irq;
  279. int result;
  280. cpumask_copy(&intr_cpus_map, cpu_online_mask);
  281. for (i = 0; i < 4; i++) {
  282. gxio_trio_context_t *context = controller->trio;
  283. int cpu;
  284. /* Ask the kernel to allocate an IRQ. */
  285. irq = irq_alloc_hwirq(-1);
  286. if (!irq) {
  287. pr_err("PCI: no free irq vectors, failed for %d\n", i);
  288. goto free_irqs;
  289. }
  290. controller->irq_intx_table[i] = irq;
  291. /* Distribute the 4 IRQs to different tiles. */
  292. cpu = tile_irq_cpu(irq);
  293. /* Configure the TRIO intr binding for this IRQ. */
  294. result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
  295. cpu_y(cpu), KERNEL_PL,
  296. irq, controller->mac, i);
  297. if (result < 0) {
  298. pr_err("PCI: MAC intx config failed for %d\n", i);
  299. goto free_irqs;
  300. }
  301. /* Register the IRQ handler with the kernel. */
  302. irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
  303. trio_handle_level_irq);
  304. irq_set_chip_data(irq, (void *)(uint64_t)i);
  305. irq_set_handler_data(irq, controller);
  306. }
  307. return 0;
  308. free_irqs:
  309. for (j = 0; j < i; j++)
  310. irq_free_hwirq(controller->irq_intx_table[j]);
  311. return -1;
  312. }
  313. /*
  314. * Return 1 if the port is strapped to operate in RC mode.
  315. */
  316. static int
  317. strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
  318. {
  319. TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
  320. unsigned int reg_offset;
  321. /* Check the port configuration. */
  322. reg_offset =
  323. (TRIO_PCIE_INTFC_PORT_CONFIG <<
  324. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  325. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  326. TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
  327. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  328. port_config.word =
  329. __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);
  330. if (port_config.strap_state == AUTO_CONFIG_RC ||
  331. port_config.strap_state == AUTO_CONFIG_RC_G1)
  332. return 1;
  333. else
  334. return 0;
  335. }
  336. /*
  337. * Find valid controllers and fill in pci_controller structs for each
  338. * of them.
  339. *
  340. * Return the number of controllers discovered.
  341. */
  342. int __init tile_pci_init(void)
  343. {
  344. int ctl_index = 0;
  345. int i, j;
  346. if (!pci_probe) {
  347. pr_info("PCI: disabled by boot argument\n");
  348. return 0;
  349. }
  350. pr_info("PCI: Searching for controllers...\n");
  351. if (num_trio_shims == 0 || sim_is_simulator())
  352. return 0;
  353. /*
  354. * Now determine which PCIe ports are configured to operate in RC
  355. * mode. There is a difference in the port configuration capability
  356. * between the Gx36 and Gx72 devices.
  357. *
  358. * The Gx36 has configuration capability for each of the 3 PCIe
  359. * interfaces (disable, auto endpoint, auto RC, etc.).
  360. * On the Gx72, you can only select one of the 3 PCIe interfaces per
  361. * TRIO to train automatically. Further, the allowable training modes
  362. * are reduced to four options (auto endpoint, auto RC, stream x1,
  363. * stream x4).
  364. *
  365. * For Gx36 ports, it must be allowed to be in RC mode by the
  366. * Board Information Block, and the hardware strapping pins must be
  367. * set to RC mode.
  368. *
  369. * For Gx72 ports, the port will operate in RC mode if either of the
  370. * following is true:
  371. * 1. It is allowed to be in RC mode by the Board Information Block,
  372. * and the BIB doesn't allow the EP mode.
  373. * 2. It is allowed to be in either the RC or the EP mode by the BIB,
  374. * and the hardware strapping pin is set to RC mode.
  375. */
  376. for (i = 0; i < TILEGX_NUM_TRIO; i++) {
  377. gxio_trio_context_t *context = &trio_contexts[i];
  378. if (context->fd < 0)
  379. continue;
  380. for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
  381. int is_rc = 0;
  382. if (pcie_ports[i].is_gx72 &&
  383. pcie_ports[i].ports[j].allow_rc) {
  384. if (!pcie_ports[i].ports[j].allow_ep ||
  385. strapped_for_rc(context, j))
  386. is_rc = 1;
  387. } else if (pcie_ports[i].ports[j].allow_rc &&
  388. strapped_for_rc(context, j)) {
  389. is_rc = 1;
  390. }
  391. if (is_rc) {
  392. pcie_rc[i][j] = 1;
  393. num_rc_controllers++;
  394. }
  395. }
  396. }
  397. /* Return if no PCIe ports are configured to operate in RC mode. */
  398. if (num_rc_controllers == 0)
  399. return 0;
  400. /* Set the TRIO pointer and MAC index for each PCIe RC port. */
  401. for (i = 0; i < TILEGX_NUM_TRIO; i++) {
  402. for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
  403. if (pcie_rc[i][j]) {
  404. pci_controllers[ctl_index].trio =
  405. &trio_contexts[i];
  406. pci_controllers[ctl_index].mac = j;
  407. pci_controllers[ctl_index].trio_index = i;
  408. ctl_index++;
  409. if (ctl_index == num_rc_controllers)
  410. goto out;
  411. }
  412. }
  413. }
  414. out:
  415. /* Configure each PCIe RC port. */
  416. for (i = 0; i < num_rc_controllers; i++) {
  417. /* Configure the PCIe MAC to run in RC mode. */
  418. struct pci_controller *controller = &pci_controllers[i];
  419. controller->index = i;
  420. controller->ops = &tile_cfg_ops;
  421. controller->io_space.start = PCIBIOS_MIN_IO +
  422. (i * IO_SPACE_SIZE);
  423. controller->io_space.end = controller->io_space.start +
  424. IO_SPACE_SIZE - 1;
  425. BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
  426. controller->io_space.flags = IORESOURCE_IO;
  427. snprintf(controller->io_space_name,
  428. sizeof(controller->io_space_name),
  429. "PCI I/O domain %d", i);
  430. controller->io_space.name = controller->io_space_name;
  431. /*
  432. * The PCI memory resource is located above the PA space.
  433. * For every host bridge, the BAR window or the MMIO aperture
  434. * is in range [3GB, 4GB - 1] of a 4GB space beyond the
  435. * PA space.
  436. */
  437. controller->mem_offset = TILE_PCI_MEM_START +
  438. (i * TILE_PCI_BAR_WINDOW_TOP);
  439. controller->mem_space.start = controller->mem_offset +
  440. TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
  441. controller->mem_space.end = controller->mem_offset +
  442. TILE_PCI_BAR_WINDOW_TOP - 1;
  443. controller->mem_space.flags = IORESOURCE_MEM;
  444. snprintf(controller->mem_space_name,
  445. sizeof(controller->mem_space_name),
  446. "PCI mem domain %d", i);
  447. controller->mem_space.name = controller->mem_space_name;
  448. }
  449. return num_rc_controllers;
  450. }
  451. /*
  452. * (pin - 1) converts from the PCI standard's [1:4] convention to
  453. * a normal [0:3] range.
  454. */
  455. static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
  456. {
  457. struct pci_controller *controller =
  458. (struct pci_controller *)dev->sysdata;
  459. return controller->irq_intx_table[pin - 1];
  460. }
  461. static void fixup_read_and_payload_sizes(struct pci_controller *controller)
  462. {
  463. gxio_trio_context_t *trio_context = controller->trio;
  464. struct pci_bus *root_bus = controller->root_bus;
  465. TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
  466. TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
  467. unsigned int reg_offset;
  468. struct pci_bus *child;
  469. int mac;
  470. int err;
  471. mac = controller->mac;
  472. /* Set our max read request size to be 4KB. */
  473. reg_offset =
  474. (TRIO_PCIE_RC_DEVICE_CONTROL <<
  475. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  476. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  477. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  478. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  479. dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
  480. reg_offset);
  481. dev_control.max_read_req_sz = 5;
  482. __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
  483. dev_control.word);
  484. /*
  485. * Set the max payload size supported by this Gx PCIe MAC.
  486. * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
  487. * experiments have shown that setting MPS to 256 yields the
  488. * best performance.
  489. */
  490. reg_offset =
  491. (TRIO_PCIE_RC_DEVICE_CAP <<
  492. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  493. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  494. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  495. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  496. rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
  497. reg_offset);
  498. rc_dev_cap.mps_sup = 1;
  499. __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
  500. rc_dev_cap.word);
  501. /* Configure PCI Express MPS setting. */
  502. list_for_each_entry(child, &root_bus->children, node)
  503. pcie_bus_configure_settings(child);
  504. /*
  505. * Set the mac_config register in trio based on the MPS/MRS of the link.
  506. */
  507. reg_offset =
  508. (TRIO_PCIE_RC_DEVICE_CONTROL <<
  509. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  510. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  511. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  512. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  513. dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
  514. reg_offset);
  515. err = gxio_trio_set_mps_mrs(trio_context,
  516. dev_control.max_payload_size,
  517. dev_control.max_read_req_sz,
  518. mac);
  519. if (err < 0) {
  520. pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
  521. mac, controller->trio_index);
  522. }
  523. }
  524. static int setup_pcie_rc_delay(char *str)
  525. {
  526. unsigned long delay = 0;
  527. unsigned long trio_index;
  528. unsigned long mac;
  529. if (str == NULL || !isdigit(*str))
  530. return -EINVAL;
  531. trio_index = simple_strtoul(str, (char **)&str, 10);
  532. if (trio_index >= TILEGX_NUM_TRIO)
  533. return -EINVAL;
  534. if (*str != ',')
  535. return -EINVAL;
  536. str++;
  537. if (!isdigit(*str))
  538. return -EINVAL;
  539. mac = simple_strtoul(str, (char **)&str, 10);
  540. if (mac >= TILEGX_TRIO_PCIES)
  541. return -EINVAL;
  542. if (*str != '\0') {
  543. if (*str != ',')
  544. return -EINVAL;
  545. str++;
  546. if (!isdigit(*str))
  547. return -EINVAL;
  548. delay = simple_strtoul(str, (char **)&str, 10);
  549. }
  550. rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
  551. return 0;
  552. }
  553. early_param("pcie_rc_delay", setup_pcie_rc_delay);
  554. /* PCI initialization entry point, called by subsys_initcall. */
  555. int __init pcibios_init(void)
  556. {
  557. resource_size_t offset;
  558. LIST_HEAD(resources);
  559. int next_busno;
  560. struct pci_host_bridge *bridge;
  561. int i;
  562. tile_pci_init();
  563. if (num_rc_controllers == 0)
  564. return 0;
  565. /*
  566. * Delay a bit in case devices aren't ready. Some devices are
  567. * known to require at least 20ms here, but we use a more
  568. * conservative value.
  569. */
  570. msleep(250);
  571. /* Scan all of the recorded PCI controllers. */
  572. for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
  573. struct pci_controller *controller = &pci_controllers[i];
  574. gxio_trio_context_t *trio_context = controller->trio;
  575. TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
  576. TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
  577. struct pci_bus *bus;
  578. unsigned int reg_offset;
  579. unsigned int class_code_revision;
  580. int trio_index;
  581. int mac;
  582. int ret;
  583. if (trio_context->fd < 0)
  584. continue;
  585. trio_index = controller->trio_index;
  586. mac = controller->mac;
  587. /*
  588. * Check for PCIe link-up status to decide if we need
  589. * to force the link to come up.
  590. */
  591. reg_offset =
  592. (TRIO_PCIE_INTFC_PORT_STATUS <<
  593. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  594. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  595. TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
  596. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  597. port_status.word =
  598. __gxio_mmio_read(trio_context->mmio_base_mac +
  599. reg_offset);
  600. if (!port_status.dl_up) {
  601. if (rc_delay[trio_index][mac]) {
  602. pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
  603. rc_delay[trio_index][mac], mac,
  604. trio_index);
  605. msleep(rc_delay[trio_index][mac] * 1000);
  606. }
  607. ret = gxio_trio_force_rc_link_up(trio_context, mac);
  608. if (ret < 0)
  609. pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
  610. mac, trio_index);
  611. }
  612. pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
  613. i, trio_index, controller->mac);
  614. /* Delay the bus probe if needed. */
  615. if (rc_delay[trio_index][mac]) {
  616. pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
  617. rc_delay[trio_index][mac], mac, trio_index);
  618. msleep(rc_delay[trio_index][mac] * 1000);
  619. } else {
  620. /*
  621. * Wait a bit here because some EP devices
  622. * take longer to come up.
  623. */
  624. msleep(1000);
  625. }
  626. /* Check for PCIe link-up status again. */
  627. port_status.word =
  628. __gxio_mmio_read(trio_context->mmio_base_mac +
  629. reg_offset);
  630. if (!port_status.dl_up) {
  631. if (pcie_ports[trio_index].ports[mac].removable) {
  632. pr_info("PCI: link is down, MAC %d on TRIO %d\n",
  633. mac, trio_index);
  634. pr_info("This is expected if no PCIe card is connected to this link\n");
  635. } else
  636. pr_err("PCI: link is down, MAC %d on TRIO %d\n",
  637. mac, trio_index);
  638. continue;
  639. }
  640. /*
  641. * Ensure that the link can come out of L1 power down state.
  642. * Strictly speaking, this is needed only in the case of
  643. * heavy RC-initiated DMAs.
  644. */
  645. reg_offset =
  646. (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
  647. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  648. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  649. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  650. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  651. tx_fifo_ctl.word =
  652. __gxio_mmio_read(trio_context->mmio_base_mac +
  653. reg_offset);
  654. tx_fifo_ctl.min_p_credits = 0;
  655. __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
  656. tx_fifo_ctl.word);
  657. /*
  658. * Change the device ID so that Linux bus crawl doesn't confuse
  659. * the internal bridge with any Tilera endpoints.
  660. */
  661. reg_offset =
  662. (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
  663. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  664. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  665. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  666. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  667. __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
  668. (TILERA_GX36_RC_DEV_ID <<
  669. TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
  670. TILERA_VENDOR_ID);
  671. /* Set the internal P2P bridge class code. */
  672. reg_offset =
  673. (TRIO_PCIE_RC_REVISION_ID <<
  674. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  675. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  676. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  677. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  678. class_code_revision =
  679. __gxio_mmio_read32(trio_context->mmio_base_mac +
  680. reg_offset);
  681. class_code_revision = (class_code_revision & 0xff) |
  682. (PCI_CLASS_BRIDGE_PCI << 16);
  683. __gxio_mmio_write32(trio_context->mmio_base_mac +
  684. reg_offset, class_code_revision);
  685. #ifdef USE_SHARED_PCIE_CONFIG_REGION
  686. /* Map in the MMIO space for the PIO region. */
  687. offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
  688. (((unsigned long long)mac) <<
  689. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
  690. #else
  691. /* Alloc a PIO region for PCI config access per MAC. */
  692. ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
  693. if (ret < 0) {
  694. pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
  695. mac, trio_index);
  696. continue;
  697. }
  698. trio_context->pio_cfg_index[mac] = ret;
  699. /* For PIO CFG, the bus_address_hi parameter is 0. */
  700. ret = gxio_trio_init_pio_region_aux(trio_context,
  701. trio_context->pio_cfg_index[mac],
  702. mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
  703. if (ret < 0) {
  704. pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
  705. mac, trio_index);
  706. continue;
  707. }
  708. offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
  709. (((unsigned long long)mac) <<
  710. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
  711. #endif
  712. /*
  713. * To save VMALLOC space, we take advantage of the fact that
  714. * bit 29 in the PIO CFG address format is reserved 0. With
  715. * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
  716. * this cuts VMALLOC space usage from 1GB to 512MB per mac.
  717. */
  718. trio_context->mmio_base_pio_cfg[mac] =
  719. iorpc_ioremap(trio_context->fd, offset, (1UL <<
  720. (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
  721. if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
  722. pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
  723. mac, trio_index);
  724. continue;
  725. }
  726. /* Initialize the PCIe interrupts. */
  727. if (tile_init_irqs(controller)) {
  728. pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
  729. mac, trio_index);
  730. continue;
  731. }
  732. /*
  733. * The PCI memory resource is located above the PA space.
  734. * The memory range for the PCI root bus should not overlap
  735. * with the physical RAM.
  736. */
  737. pci_add_resource_offset(&resources, &controller->mem_space,
  738. controller->mem_offset);
  739. pci_add_resource(&resources, &controller->io_space);
  740. controller->first_busno = next_busno;
  741. bridge = pci_alloc_host_bridge(0);
  742. if (!bridge)
  743. break;
  744. list_splice_init(&resources, &bridge->windows);
  745. bridge->dev.parent = NULL;
  746. bridge->sysdata = controller;
  747. bridge->busnr = next_busno;
  748. bridge->ops = controller->ops;
  749. bridge->swizzle_irq = pci_common_swizzle;
  750. bridge->map_irq = tile_map_irq;
  751. pci_scan_root_bus_bridge(bridge);
  752. bus = bridge->bus;
  753. controller->root_bus = bus;
  754. next_busno = bus->busn_res.end + 1;
  755. }
  756. /*
  757. * This comes from the generic Linux PCI driver.
  758. *
  759. * It allocates all of the resources (I/O memory, etc)
  760. * associated with the devices read in above.
  761. */
  762. pci_assign_unassigned_resources();
  763. /* Record the I/O resources in the PCI controller structure. */
  764. for (i = 0; i < num_rc_controllers; i++) {
  765. struct pci_controller *controller = &pci_controllers[i];
  766. gxio_trio_context_t *trio_context = controller->trio;
  767. struct pci_bus *root_bus = pci_controllers[i].root_bus;
  768. int ret;
  769. int j;
  770. /*
  771. * Skip controllers that are not properly initialized or
  772. * have down links.
  773. */
  774. if (root_bus == NULL)
  775. continue;
  776. /* Configure the max_payload_size values for this domain. */
  777. fixup_read_and_payload_sizes(controller);
  778. /* Alloc a PIO region for PCI memory access for each RC port. */
  779. ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
  780. if (ret < 0) {
  781. pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
  782. controller->trio_index, controller->mac);
  783. continue;
  784. }
  785. controller->pio_mem_index = ret;
  786. /*
  787. * For PIO MEM, the bus_address_hi parameter is hard-coded 0
  788. * because we always assign 32-bit PCI bus BAR ranges.
  789. */
  790. ret = gxio_trio_init_pio_region_aux(trio_context,
  791. controller->pio_mem_index,
  792. controller->mac,
  793. 0,
  794. 0);
  795. if (ret < 0) {
  796. pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
  797. controller->trio_index, controller->mac);
  798. continue;
  799. }
  800. #ifdef CONFIG_TILE_PCI_IO
  801. /*
  802. * Alloc a PIO region for PCI I/O space access for each RC port.
  803. */
  804. ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
  805. if (ret < 0) {
  806. pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
  807. controller->trio_index, controller->mac);
  808. continue;
  809. }
  810. controller->pio_io_index = ret;
  811. /*
  812. * For PIO IO, the bus_address_hi parameter is hard-coded 0
  813. * because PCI I/O address space is 32-bit.
  814. */
  815. ret = gxio_trio_init_pio_region_aux(trio_context,
  816. controller->pio_io_index,
  817. controller->mac,
  818. 0,
  819. HV_TRIO_PIO_FLAG_IO_SPACE);
  820. if (ret < 0) {
  821. pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
  822. controller->trio_index, controller->mac);
  823. continue;
  824. }
  825. #endif
  826. /*
  827. * Configure a Mem-Map region for each memory controller so
  828. * that Linux can map all of its PA space to the PCI bus.
  829. * Use the IOMMU to handle hash-for-home memory.
  830. */
  831. for_each_online_node(j) {
  832. unsigned long start_pfn = node_start_pfn[j];
  833. unsigned long end_pfn = node_end_pfn[j];
  834. unsigned long nr_pages = end_pfn - start_pfn;
  835. ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
  836. 0);
  837. if (ret < 0) {
  838. pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
  839. controller->trio_index, controller->mac,
  840. j);
  841. goto alloc_mem_map_failed;
  842. }
  843. controller->mem_maps[j] = ret;
  844. /*
  845. * Initialize the Mem-Map and the I/O MMU so that all
  846. * the physical memory can be accessed by the endpoint
  847. * devices. The base bus address is set to the base CPA
  848. * of this memory controller plus an offset (see pci.h).
  849. * The region's base VA is set to the base CPA. The
  850. * I/O MMU table essentially translates the CPA to
  851. * the real PA. Implicitly, for node 0, we create
  852. * a separate Mem-Map region that serves as the inbound
  853. * window for legacy 32-bit devices. This is a direct
  854. * map of the low 4GB CPA space.
  855. */
  856. ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
  857. controller->mem_maps[j],
  858. start_pfn << PAGE_SHIFT,
  859. nr_pages << PAGE_SHIFT,
  860. trio_context->asid,
  861. controller->mac,
  862. (start_pfn << PAGE_SHIFT) +
  863. TILE_PCI_MEM_MAP_BASE_OFFSET,
  864. j,
  865. GXIO_TRIO_ORDER_MODE_UNORDERED);
  866. if (ret < 0) {
  867. pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
  868. controller->trio_index, controller->mac,
  869. j);
  870. goto alloc_mem_map_failed;
  871. }
  872. continue;
  873. alloc_mem_map_failed:
  874. break;
  875. }
  876. pci_bus_add_devices(root_bus);
  877. }
  878. return 0;
  879. }
  880. subsys_initcall(pcibios_init);
  881. /* Process any "pci=" kernel boot arguments. */
  882. char *__init pcibios_setup(char *str)
  883. {
  884. if (!strcmp(str, "off")) {
  885. pci_probe = 0;
  886. return NULL;
  887. }
  888. return str;
  889. }
  890. /*
  891. * Called for each device after PCI setup is done.
  892. * We initialize the PCI device capabilities conservatively, assuming that
  893. * all devices can only address the 32-bit DMA space. The exception here is
  894. * that the device dma_offset is set to the value that matches the 64-bit
  895. * capable devices. This is OK because dma_offset is not used by legacy
  896. * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
  897. * This implementation matches the kernel design of setting PCI devices'
  898. * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
  899. * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
  900. */
  901. static void pcibios_fixup_final(struct pci_dev *pdev)
  902. {
  903. set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
  904. set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
  905. pdev->dev.archdata.max_direct_dma_addr =
  906. TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
  907. pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
  908. }
  909. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
  910. /* Map a PCI MMIO bus address into VA space. */
  911. void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
  912. {
  913. struct pci_controller *controller = NULL;
  914. resource_size_t bar_start;
  915. resource_size_t bar_end;
  916. resource_size_t offset;
  917. resource_size_t start;
  918. resource_size_t end;
  919. int trio_fd;
  920. int i;
  921. start = phys_addr;
  922. end = phys_addr + size - 1;
  923. /*
  924. * By searching phys_addr in each controller's mem_space, we can
  925. * determine the controller that should accept the PCI memory access.
  926. */
  927. for (i = 0; i < num_rc_controllers; i++) {
  928. /*
  929. * Skip controllers that are not properly initialized or
  930. * have down links.
  931. */
  932. if (pci_controllers[i].root_bus == NULL)
  933. continue;
  934. bar_start = pci_controllers[i].mem_space.start;
  935. bar_end = pci_controllers[i].mem_space.end;
  936. if ((start >= bar_start) && (end <= bar_end)) {
  937. controller = &pci_controllers[i];
  938. break;
  939. }
  940. }
  941. if (controller == NULL)
  942. return NULL;
  943. trio_fd = controller->trio->fd;
  944. /* Convert the resource start to the bus address offset. */
  945. start = phys_addr - controller->mem_offset;
  946. offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
  947. /* We need to keep the PCI bus address's in-page offset in the VA. */
  948. return iorpc_ioremap(trio_fd, offset, size) +
  949. (start & (PAGE_SIZE - 1));
  950. }
  951. EXPORT_SYMBOL(ioremap);
  952. #ifdef CONFIG_TILE_PCI_IO
  953. /* Map a PCI I/O address into VA space. */
  954. void __iomem *ioport_map(unsigned long port, unsigned int size)
  955. {
  956. struct pci_controller *controller = NULL;
  957. resource_size_t bar_start;
  958. resource_size_t bar_end;
  959. resource_size_t offset;
  960. resource_size_t start;
  961. resource_size_t end;
  962. int trio_fd;
  963. int i;
  964. start = port;
  965. end = port + size - 1;
  966. /*
  967. * By searching the port in each controller's io_space, we can
  968. * determine the controller that should accept the PCI I/O access.
  969. */
  970. for (i = 0; i < num_rc_controllers; i++) {
  971. /*
  972. * Skip controllers that are not properly initialized or
  973. * have down links.
  974. */
  975. if (pci_controllers[i].root_bus == NULL)
  976. continue;
  977. bar_start = pci_controllers[i].io_space.start;
  978. bar_end = pci_controllers[i].io_space.end;
  979. if ((start >= bar_start) && (end <= bar_end)) {
  980. controller = &pci_controllers[i];
  981. break;
  982. }
  983. }
  984. if (controller == NULL)
  985. return NULL;
  986. trio_fd = controller->trio->fd;
  987. /* Convert the resource start to the bus address offset. */
  988. port -= controller->io_space.start;
  989. offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;
  990. /* We need to keep the PCI bus address's in-page offset in the VA. */
  991. return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
  992. }
  993. EXPORT_SYMBOL(ioport_map);
  994. void ioport_unmap(void __iomem *addr)
  995. {
  996. iounmap(addr);
  997. }
  998. EXPORT_SYMBOL(ioport_unmap);
  999. #endif
  1000. void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
  1001. {
  1002. iounmap(addr);
  1003. }
  1004. EXPORT_SYMBOL(pci_iounmap);
  1005. /****************************************************************
  1006. *
  1007. * Tile PCI config space read/write routines
  1008. *
  1009. ****************************************************************/
  1010. /*
  1011. * These are the normal read and write ops
  1012. * These are expanded with macros from pci_bus_read_config_byte() etc.
  1013. *
  1014. * devfn is the combined PCI device & function.
  1015. *
  1016. * offset is in bytes, from the start of config space for the
  1017. * specified bus & device.
  1018. */
  1019. static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
  1020. int size, u32 *val)
  1021. {
  1022. struct pci_controller *controller = bus->sysdata;
  1023. gxio_trio_context_t *trio_context = controller->trio;
  1024. int busnum = bus->number & 0xff;
  1025. int device = PCI_SLOT(devfn);
  1026. int function = PCI_FUNC(devfn);
  1027. int config_type = 1;
  1028. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
  1029. void *mmio_addr;
  1030. /*
  1031. * Map all accesses to the local device on root bus into the
  1032. * MMIO space of the MAC. Accesses to the downstream devices
  1033. * go to the PIO space.
  1034. */
  1035. if (pci_is_root_bus(bus)) {
  1036. if (device == 0) {
  1037. /*
  1038. * This is the internal downstream P2P bridge,
  1039. * access directly.
  1040. */
  1041. unsigned int reg_offset;
  1042. reg_offset = ((offset & 0xFFF) <<
  1043. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  1044. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
  1045. << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  1046. (controller->mac <<
  1047. TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  1048. mmio_addr = trio_context->mmio_base_mac + reg_offset;
  1049. goto valid_device;
  1050. } else {
  1051. /*
  1052. * We fake an empty device for (device > 0),
  1053. * since there is only one device on bus 0.
  1054. */
  1055. goto invalid_device;
  1056. }
  1057. }
  1058. /*
  1059. * Accesses to the directly attached device have to be
  1060. * sent as type-0 configs.
  1061. */
  1062. if (busnum == (controller->first_busno + 1)) {
  1063. /*
  1064. * There is only one device off of our built-in P2P bridge.
  1065. */
  1066. if (device != 0)
  1067. goto invalid_device;
  1068. config_type = 0;
  1069. }
  1070. cfg_addr.word = 0;
  1071. cfg_addr.reg_addr = (offset & 0xFFF);
  1072. cfg_addr.fn = function;
  1073. cfg_addr.dev = device;
  1074. cfg_addr.bus = busnum;
  1075. cfg_addr.type = config_type;
  1076. /*
  1077. * Note that we don't set the mac field in cfg_addr because the
  1078. * mapping is per port.
  1079. */
  1080. mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
  1081. cfg_addr.word;
  1082. valid_device:
  1083. switch (size) {
  1084. case 4:
  1085. *val = __gxio_mmio_read32(mmio_addr);
  1086. break;
  1087. case 2:
  1088. *val = __gxio_mmio_read16(mmio_addr);
  1089. break;
  1090. case 1:
  1091. *val = __gxio_mmio_read8(mmio_addr);
  1092. break;
  1093. default:
  1094. return PCIBIOS_FUNC_NOT_SUPPORTED;
  1095. }
  1096. TRACE_CFG_RD(size, *val, busnum, device, function, offset);
  1097. return 0;
  1098. invalid_device:
  1099. switch (size) {
  1100. case 4:
  1101. *val = 0xFFFFFFFF;
  1102. break;
  1103. case 2:
  1104. *val = 0xFFFF;
  1105. break;
  1106. case 1:
  1107. *val = 0xFF;
  1108. break;
  1109. default:
  1110. return PCIBIOS_FUNC_NOT_SUPPORTED;
  1111. }
  1112. return 0;
  1113. }
  1114. /*
  1115. * See tile_cfg_read() for relevant comments.
  1116. * Note that "val" is the value to write, not a pointer to that value.
  1117. */
  1118. static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
  1119. int size, u32 val)
  1120. {
  1121. struct pci_controller *controller = bus->sysdata;
  1122. gxio_trio_context_t *trio_context = controller->trio;
  1123. int busnum = bus->number & 0xff;
  1124. int device = PCI_SLOT(devfn);
  1125. int function = PCI_FUNC(devfn);
  1126. int config_type = 1;
  1127. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
  1128. void *mmio_addr;
  1129. u32 val_32 = (u32)val;
  1130. u16 val_16 = (u16)val;
  1131. u8 val_8 = (u8)val;
  1132. /*
  1133. * Map all accesses to the local device on root bus into the
  1134. * MMIO space of the MAC. Accesses to the downstream devices
  1135. * go to the PIO space.
  1136. */
  1137. if (pci_is_root_bus(bus)) {
  1138. if (device == 0) {
  1139. /*
  1140. * This is the internal downstream P2P bridge,
  1141. * access directly.
  1142. */
  1143. unsigned int reg_offset;
  1144. reg_offset = ((offset & 0xFFF) <<
  1145. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  1146. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
  1147. << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  1148. (controller->mac <<
  1149. TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  1150. mmio_addr = trio_context->mmio_base_mac + reg_offset;
  1151. goto valid_device;
  1152. } else {
  1153. /*
  1154. * We fake an empty device for (device > 0),
  1155. * since there is only one device on bus 0.
  1156. */
  1157. goto invalid_device;
  1158. }
  1159. }
  1160. /*
  1161. * Accesses to the directly attached device have to be
  1162. * sent as type-0 configs.
  1163. */
  1164. if (busnum == (controller->first_busno + 1)) {
  1165. /*
  1166. * There is only one device off of our built-in P2P bridge.
  1167. */
  1168. if (device != 0)
  1169. goto invalid_device;
  1170. config_type = 0;
  1171. }
  1172. cfg_addr.word = 0;
  1173. cfg_addr.reg_addr = (offset & 0xFFF);
  1174. cfg_addr.fn = function;
  1175. cfg_addr.dev = device;
  1176. cfg_addr.bus = busnum;
  1177. cfg_addr.type = config_type;
  1178. /*
  1179. * Note that we don't set the mac field in cfg_addr because the
  1180. * mapping is per port.
  1181. */
  1182. mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
  1183. cfg_addr.word;
  1184. valid_device:
  1185. switch (size) {
  1186. case 4:
  1187. __gxio_mmio_write32(mmio_addr, val_32);
  1188. TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
  1189. break;
  1190. case 2:
  1191. __gxio_mmio_write16(mmio_addr, val_16);
  1192. TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
  1193. break;
  1194. case 1:
  1195. __gxio_mmio_write8(mmio_addr, val_8);
  1196. TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
  1197. break;
  1198. default:
  1199. return PCIBIOS_FUNC_NOT_SUPPORTED;
  1200. }
  1201. invalid_device:
  1202. return 0;
  1203. }
  1204. static struct pci_ops tile_cfg_ops = {
  1205. .read = tile_cfg_read,
  1206. .write = tile_cfg_write,
  1207. };
  1208. /* MSI support starts here. */
  1209. static unsigned int tilegx_msi_startup(struct irq_data *d)
  1210. {
  1211. if (irq_data_get_msi_desc(d))
  1212. pci_msi_unmask_irq(d);
  1213. return 0;
  1214. }
  1215. static void tilegx_msi_ack(struct irq_data *d)
  1216. {
  1217. __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
  1218. }
  1219. static void tilegx_msi_mask(struct irq_data *d)
  1220. {
  1221. pci_msi_mask_irq(d);
  1222. __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
  1223. }
  1224. static void tilegx_msi_unmask(struct irq_data *d)
  1225. {
  1226. __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
  1227. pci_msi_unmask_irq(d);
  1228. }
  1229. static struct irq_chip tilegx_msi_chip = {
  1230. .name = "tilegx_msi",
  1231. .irq_startup = tilegx_msi_startup,
  1232. .irq_ack = tilegx_msi_ack,
  1233. .irq_mask = tilegx_msi_mask,
  1234. .irq_unmask = tilegx_msi_unmask,
  1235. /* TBD: support set_affinity. */
  1236. };
  1237. int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  1238. {
  1239. struct pci_controller *controller;
  1240. gxio_trio_context_t *trio_context;
  1241. struct msi_msg msg;
  1242. int default_irq;
  1243. uint64_t mem_map_base;
  1244. uint64_t mem_map_limit;
  1245. u64 msi_addr;
  1246. int mem_map;
  1247. int cpu;
  1248. int irq;
  1249. int ret;
  1250. irq = irq_alloc_hwirq(-1);
  1251. if (!irq)
  1252. return -ENOSPC;
  1253. /*
  1254. * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
  1255. * devices that are not capable of generating a 64-bit message address.
  1256. * These devices will fall back to using the legacy interrupts.
  1257. * Most PCIe endpoint devices do support 64-bit message addressing.
  1258. */
  1259. if (desc->msi_attrib.is_64 == 0) {
  1260. dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
  1261. ret = -ENOMEM;
  1262. goto is_64_failure;
  1263. }
  1264. default_irq = desc->msi_attrib.default_irq;
  1265. controller = irq_get_handler_data(default_irq);
  1266. BUG_ON(!controller);
  1267. trio_context = controller->trio;
  1268. /*
  1269. * Allocate a scatter-queue that will accept the MSI write and
  1270. * trigger the TILE-side interrupts. We use the scatter-queue regions
  1271. * before the mem map regions, because the latter are needed by more
  1272. * applications.
  1273. */
  1274. mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
  1275. if (mem_map >= 0) {
  1276. TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
  1277. .pop = 0,
  1278. .doorbell = 1,
  1279. }};
  1280. mem_map += TRIO_NUM_MAP_MEM_REGIONS;
  1281. mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
  1282. mem_map * MEM_MAP_INTR_REGION_SIZE;
  1283. mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
  1284. msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
  1285. msg.data = (unsigned int)doorbell_template.word;
  1286. } else {
  1287. /* SQ regions are out, allocate from map mem regions. */
  1288. mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
  1289. if (mem_map < 0) {
  1290. dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
  1291. desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
  1292. ret = -ENOMEM;
  1293. goto msi_mem_map_alloc_failure;
  1294. }
  1295. mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
  1296. mem_map * MEM_MAP_INTR_REGION_SIZE;
  1297. mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
  1298. msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
  1299. TRIO_MAP_MEM_REG_INT0;
  1300. msg.data = mem_map;
  1301. }
  1302. /* We try to distribute different IRQs to different tiles. */
  1303. cpu = tile_irq_cpu(irq);
  1304. /*
  1305. * Now call up to the HV to configure the MSI interrupt and
  1306. * set up the IPI binding.
  1307. */
  1308. ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
  1309. KERNEL_PL, irq, controller->mac,
  1310. mem_map, mem_map_base, mem_map_limit,
  1311. trio_context->asid);
  1312. if (ret < 0) {
  1313. dev_info(&pdev->dev, "HV MSI config failed\n");
  1314. goto hv_msi_config_failure;
  1315. }
  1316. irq_set_msi_desc(irq, desc);
  1317. msg.address_hi = msi_addr >> 32;
  1318. msg.address_lo = msi_addr & 0xffffffff;
  1319. pci_write_msi_msg(irq, &msg);
  1320. irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
  1321. irq_set_handler_data(irq, controller);
  1322. return 0;
  1323. hv_msi_config_failure:
  1324. /* Free mem-map */
  1325. msi_mem_map_alloc_failure:
  1326. is_64_failure:
  1327. irq_free_hwirq(irq);
  1328. return ret;
  1329. }
  1330. void arch_teardown_msi_irq(unsigned int irq)
  1331. {
  1332. irq_free_hwirq(irq);
  1333. }