consistent.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /*
  2. * arch/sh/mm/consistent.c
  3. *
  4. * Copyright (C) 2004 - 2007 Paul Mundt
  5. *
  6. * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/init.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dma-debug.h>
  17. #include <linux/io.h>
  18. #include <linux/module.h>
  19. #include <linux/gfp.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/addrspace.h>
  22. const struct dma_map_ops *dma_ops;
  23. EXPORT_SYMBOL(dma_ops);
  24. void *dma_generic_alloc_coherent(struct device *dev, size_t size,
  25. dma_addr_t *dma_handle, gfp_t gfp,
  26. unsigned long attrs)
  27. {
  28. void *ret, *ret_nocache;
  29. int order = get_order(size);
  30. gfp |= __GFP_ZERO;
  31. ret = (void *)__get_free_pages(gfp, order);
  32. if (!ret)
  33. return NULL;
  34. /*
  35. * Pages from the page allocator may have data present in
  36. * cache. So flush the cache before using uncached memory.
  37. */
  38. sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
  39. ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
  40. if (!ret_nocache) {
  41. free_pages((unsigned long)ret, order);
  42. return NULL;
  43. }
  44. split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
  45. *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
  46. return ret_nocache;
  47. }
  48. void dma_generic_free_coherent(struct device *dev, size_t size,
  49. void *vaddr, dma_addr_t dma_handle,
  50. unsigned long attrs)
  51. {
  52. int order = get_order(size);
  53. unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset;
  54. int k;
  55. for (k = 0; k < (1 << order); k++)
  56. __free_pages(pfn_to_page(pfn + k), 0);
  57. iounmap(vaddr);
  58. }
  59. void sh_sync_dma_for_device(void *vaddr, size_t size,
  60. enum dma_data_direction direction)
  61. {
  62. void *addr;
  63. addr = __in_29bit_mode() ?
  64. (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
  65. switch (direction) {
  66. case DMA_FROM_DEVICE: /* invalidate only */
  67. __flush_invalidate_region(addr, size);
  68. break;
  69. case DMA_TO_DEVICE: /* writeback only */
  70. __flush_wback_region(addr, size);
  71. break;
  72. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  73. __flush_purge_region(addr, size);
  74. break;
  75. default:
  76. BUG();
  77. }
  78. }
  79. EXPORT_SYMBOL(sh_sync_dma_for_device);
  80. static int __init memchunk_setup(char *str)
  81. {
  82. return 1; /* accept anything that begins with "memchunk." */
  83. }
  84. __setup("memchunk.", memchunk_setup);
  85. static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
  86. {
  87. char *p = boot_command_line;
  88. int k = strlen(name);
  89. while ((p = strstr(p, "memchunk."))) {
  90. p += 9; /* strlen("memchunk.") */
  91. if (!strncmp(name, p, k) && p[k] == '=') {
  92. p += k + 1;
  93. *sizep = memparse(p, NULL);
  94. pr_info("%s: forcing memory chunk size to 0x%08lx\n",
  95. name, *sizep);
  96. break;
  97. }
  98. }
  99. }
  100. int __init platform_resource_setup_memory(struct platform_device *pdev,
  101. char *name, unsigned long memsize)
  102. {
  103. struct resource *r;
  104. dma_addr_t dma_handle;
  105. void *buf;
  106. r = pdev->resource + pdev->num_resources - 1;
  107. if (r->flags) {
  108. pr_warning("%s: unable to find empty space for resource\n",
  109. name);
  110. return -EINVAL;
  111. }
  112. memchunk_cmdline_override(name, &memsize);
  113. if (!memsize)
  114. return 0;
  115. buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
  116. if (!buf) {
  117. pr_warning("%s: unable to allocate memory\n", name);
  118. return -ENOMEM;
  119. }
  120. memset(buf, 0, memsize);
  121. r->flags = IORESOURCE_MEM;
  122. r->start = dma_handle;
  123. r->end = r->start + memsize - 1;
  124. r->name = name;
  125. return 0;
  126. }