cavium-thunderx.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2016 Cavium Inc.
  9. */
  10. #include <linux/dma-mapping.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/mmc/mmc.h>
  13. #include <linux/module.h>
  14. #include <linux/of.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/pci.h>
  17. #include "cavium.h"
  18. static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
  19. {
  20. down(&host->mmc_serializer);
  21. }
  22. static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
  23. {
  24. up(&host->mmc_serializer);
  25. }
  26. static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
  27. {
  28. writeq(val, host->base + MIO_EMM_INT(host));
  29. writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
  30. }
  31. static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
  32. struct pci_dev *pdev)
  33. {
  34. int nvec, ret, i;
  35. nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
  36. if (nvec < 0)
  37. return nvec;
  38. /* register interrupts */
  39. for (i = 0; i < nvec; i++) {
  40. ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
  41. cvm_mmc_interrupt,
  42. 0, cvm_mmc_irq_names[i], host);
  43. if (ret)
  44. return ret;
  45. }
  46. return 0;
  47. }
  48. static int thunder_mmc_probe(struct pci_dev *pdev,
  49. const struct pci_device_id *id)
  50. {
  51. struct device_node *node = pdev->dev.of_node;
  52. struct device *dev = &pdev->dev;
  53. struct device_node *child_node;
  54. struct cvm_mmc_host *host;
  55. int ret, i = 0;
  56. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  57. if (!host)
  58. return -ENOMEM;
  59. pci_set_drvdata(pdev, host);
  60. ret = pcim_enable_device(pdev);
  61. if (ret)
  62. return ret;
  63. ret = pci_request_regions(pdev, KBUILD_MODNAME);
  64. if (ret)
  65. return ret;
  66. host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
  67. if (!host->base)
  68. return -EINVAL;
  69. /* On ThunderX these are identical */
  70. host->dma_base = host->base;
  71. host->reg_off = 0x2000;
  72. host->reg_off_dma = 0x160;
  73. host->clk = devm_clk_get(dev, NULL);
  74. if (IS_ERR(host->clk))
  75. return PTR_ERR(host->clk);
  76. ret = clk_prepare_enable(host->clk);
  77. if (ret)
  78. return ret;
  79. host->sys_freq = clk_get_rate(host->clk);
  80. spin_lock_init(&host->irq_handler_lock);
  81. sema_init(&host->mmc_serializer, 1);
  82. host->dev = dev;
  83. host->acquire_bus = thunder_mmc_acquire_bus;
  84. host->release_bus = thunder_mmc_release_bus;
  85. host->int_enable = thunder_mmc_int_enable;
  86. host->use_sg = true;
  87. host->big_dma_addr = true;
  88. host->need_irq_handler_lock = true;
  89. host->last_slot = -1;
  90. ret = dma_set_mask(dev, DMA_BIT_MASK(48));
  91. if (ret)
  92. goto error;
  93. /*
  94. * Clear out any pending interrupts that may be left over from
  95. * bootloader. Writing 1 to the bits clears them.
  96. */
  97. writeq(127, host->base + MIO_EMM_INT_EN(host));
  98. writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
  99. /* Clear DMA FIFO */
  100. writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
  101. ret = thunder_mmc_register_interrupts(host, pdev);
  102. if (ret)
  103. goto error;
  104. for_each_child_of_node(node, child_node) {
  105. /*
  106. * mmc_of_parse and devm* require one device per slot.
  107. * Create a dummy device per slot and set the node pointer to
  108. * the slot. The easiest way to get this is using
  109. * of_platform_device_create.
  110. */
  111. if (of_device_is_compatible(child_node, "mmc-slot")) {
  112. host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
  113. &pdev->dev);
  114. if (!host->slot_pdev[i])
  115. continue;
  116. ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
  117. if (ret)
  118. goto error;
  119. }
  120. i++;
  121. }
  122. dev_info(dev, "probed\n");
  123. return 0;
  124. error:
  125. for (i = 0; i < CAVIUM_MAX_MMC; i++) {
  126. if (host->slot[i])
  127. cvm_mmc_of_slot_remove(host->slot[i]);
  128. if (host->slot_pdev[i])
  129. of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
  130. }
  131. clk_disable_unprepare(host->clk);
  132. return ret;
  133. }
  134. static void thunder_mmc_remove(struct pci_dev *pdev)
  135. {
  136. struct cvm_mmc_host *host = pci_get_drvdata(pdev);
  137. u64 dma_cfg;
  138. int i;
  139. for (i = 0; i < CAVIUM_MAX_MMC; i++)
  140. if (host->slot[i])
  141. cvm_mmc_of_slot_remove(host->slot[i]);
  142. dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
  143. dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
  144. writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
  145. clk_disable_unprepare(host->clk);
  146. }
  147. static const struct pci_device_id thunder_mmc_id_table[] = {
  148. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
  149. { 0, } /* end of table */
  150. };
  151. static struct pci_driver thunder_mmc_driver = {
  152. .name = KBUILD_MODNAME,
  153. .id_table = thunder_mmc_id_table,
  154. .probe = thunder_mmc_probe,
  155. .remove = thunder_mmc_remove,
  156. };
  157. module_pci_driver(thunder_mmc_driver);
  158. MODULE_AUTHOR("Cavium Inc.");
  159. MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
  160. MODULE_LICENSE("GPL");
  161. MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);