nv50.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
  25. #include "priv.h"
  26. #include <core/memory.h>
  27. #include <subdev/bar.h>
  28. #include <subdev/fb.h>
  29. #include <subdev/mmu.h>
  30. struct nv50_instmem {
  31. struct nvkm_instmem base;
  32. unsigned long lock_flags;
  33. spinlock_t lock;
  34. u64 addr;
  35. };
  36. /******************************************************************************
  37. * instmem object implementation
  38. *****************************************************************************/
  39. #define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
  40. struct nv50_instobj {
  41. struct nvkm_memory memory;
  42. struct nv50_instmem *imem;
  43. struct nvkm_mem *mem;
  44. struct nvkm_vma bar;
  45. void *map;
  46. };
  47. static enum nvkm_memory_target
  48. nv50_instobj_target(struct nvkm_memory *memory)
  49. {
  50. return NVKM_MEM_TARGET_VRAM;
  51. }
  52. static u64
  53. nv50_instobj_addr(struct nvkm_memory *memory)
  54. {
  55. return nv50_instobj(memory)->mem->offset;
  56. }
  57. static u64
  58. nv50_instobj_size(struct nvkm_memory *memory)
  59. {
  60. return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
  61. }
  62. static void
  63. nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
  64. {
  65. struct nv50_instobj *iobj = nv50_instobj(memory);
  66. struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
  67. struct nvkm_device *device = subdev->device;
  68. u64 size = nvkm_memory_size(memory);
  69. void __iomem *map;
  70. int ret;
  71. iobj->map = ERR_PTR(-ENOMEM);
  72. ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
  73. if (ret == 0) {
  74. map = ioremap(device->func->resource_addr(device, 3) +
  75. (u32)iobj->bar.offset, size);
  76. if (map) {
  77. nvkm_memory_map(memory, &iobj->bar, 0);
  78. iobj->map = map;
  79. } else {
  80. nvkm_warn(subdev, "PRAMIN ioremap failed\n");
  81. nvkm_vm_put(&iobj->bar);
  82. }
  83. } else {
  84. nvkm_warn(subdev, "PRAMIN exhausted\n");
  85. }
  86. }
  87. static void
  88. nv50_instobj_release(struct nvkm_memory *memory)
  89. {
  90. struct nv50_instmem *imem = nv50_instobj(memory)->imem;
  91. spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
  92. }
  93. static void __iomem *
  94. nv50_instobj_acquire(struct nvkm_memory *memory)
  95. {
  96. struct nv50_instobj *iobj = nv50_instobj(memory);
  97. struct nv50_instmem *imem = iobj->imem;
  98. struct nvkm_bar *bar = imem->base.subdev.device->bar;
  99. struct nvkm_vm *vm;
  100. unsigned long flags;
  101. if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
  102. nvkm_memory_boot(memory, vm);
  103. if (!IS_ERR_OR_NULL(iobj->map))
  104. return iobj->map;
  105. spin_lock_irqsave(&imem->lock, flags);
  106. imem->lock_flags = flags;
  107. return NULL;
  108. }
  109. static u32
  110. nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
  111. {
  112. struct nv50_instobj *iobj = nv50_instobj(memory);
  113. struct nv50_instmem *imem = iobj->imem;
  114. struct nvkm_device *device = imem->base.subdev.device;
  115. u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
  116. u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
  117. u32 data;
  118. if (unlikely(imem->addr != base)) {
  119. nvkm_wr32(device, 0x001700, base >> 16);
  120. imem->addr = base;
  121. }
  122. data = nvkm_rd32(device, 0x700000 + addr);
  123. return data;
  124. }
  125. static void
  126. nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
  127. {
  128. struct nv50_instobj *iobj = nv50_instobj(memory);
  129. struct nv50_instmem *imem = iobj->imem;
  130. struct nvkm_device *device = imem->base.subdev.device;
  131. u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
  132. u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
  133. if (unlikely(imem->addr != base)) {
  134. nvkm_wr32(device, 0x001700, base >> 16);
  135. imem->addr = base;
  136. }
  137. nvkm_wr32(device, 0x700000 + addr, data);
  138. }
  139. static void
  140. nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
  141. {
  142. struct nv50_instobj *iobj = nv50_instobj(memory);
  143. nvkm_vm_map_at(vma, offset, iobj->mem);
  144. }
  145. static void *
  146. nv50_instobj_dtor(struct nvkm_memory *memory)
  147. {
  148. struct nv50_instobj *iobj = nv50_instobj(memory);
  149. struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
  150. if (!IS_ERR_OR_NULL(iobj->map)) {
  151. nvkm_vm_put(&iobj->bar);
  152. iounmap(iobj->map);
  153. }
  154. ram->func->put(ram, &iobj->mem);
  155. return iobj;
  156. }
  157. static const struct nvkm_memory_func
  158. nv50_instobj_func = {
  159. .dtor = nv50_instobj_dtor,
  160. .target = nv50_instobj_target,
  161. .size = nv50_instobj_size,
  162. .addr = nv50_instobj_addr,
  163. .boot = nv50_instobj_boot,
  164. .acquire = nv50_instobj_acquire,
  165. .release = nv50_instobj_release,
  166. .rd32 = nv50_instobj_rd32,
  167. .wr32 = nv50_instobj_wr32,
  168. .map = nv50_instobj_map,
  169. };
  170. static int
  171. nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
  172. struct nvkm_memory **pmemory)
  173. {
  174. struct nv50_instmem *imem = nv50_instmem(base);
  175. struct nv50_instobj *iobj;
  176. struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
  177. int ret;
  178. if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
  179. return -ENOMEM;
  180. *pmemory = &iobj->memory;
  181. nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
  182. iobj->imem = imem;
  183. size = max((size + 4095) & ~4095, (u32)4096);
  184. align = max((align + 4095) & ~4095, (u32)4096);
  185. ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
  186. if (ret)
  187. return ret;
  188. iobj->mem->page_shift = 12;
  189. return 0;
  190. }
  191. /******************************************************************************
  192. * instmem subdev implementation
  193. *****************************************************************************/
  194. static void
  195. nv50_instmem_fini(struct nvkm_instmem *base)
  196. {
  197. nv50_instmem(base)->addr = ~0ULL;
  198. }
  199. static const struct nvkm_instmem_func
  200. nv50_instmem = {
  201. .fini = nv50_instmem_fini,
  202. .memory_new = nv50_instobj_new,
  203. .persistent = false,
  204. .zero = false,
  205. };
  206. int
  207. nv50_instmem_new(struct nvkm_device *device, int index,
  208. struct nvkm_instmem **pimem)
  209. {
  210. struct nv50_instmem *imem;
  211. if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
  212. return -ENOMEM;
  213. nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
  214. spin_lock_init(&imem->lock);
  215. *pimem = &imem->base;
  216. return 0;
  217. }