dmanv40.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "channv04.h"
  25. #include "regsnv04.h"
  26. #include <core/client.h>
  27. #include <core/ramht.h>
  28. #include <subdev/instmem.h>
  29. #include <nvif/class.h>
  30. #include <nvif/cl006b.h>
  31. #include <nvif/unpack.h>
  32. static bool
  33. nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
  34. {
  35. switch (engine->subdev.index) {
  36. case NVKM_ENGINE_DMAOBJ:
  37. case NVKM_ENGINE_SW:
  38. return false;
  39. case NVKM_ENGINE_GR:
  40. *reg = 0x0032e0;
  41. *ctx = 0x38;
  42. return true;
  43. case NVKM_ENGINE_MPEG:
  44. if (engine->subdev.device->chipset < 0x44)
  45. return false;
  46. *reg = 0x00330c;
  47. *ctx = 0x54;
  48. return true;
  49. default:
  50. WARN_ON(1);
  51. return false;
  52. }
  53. }
  54. static int
  55. nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
  56. struct nvkm_engine *engine, bool suspend)
  57. {
  58. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  59. struct nv04_fifo *fifo = chan->fifo;
  60. struct nvkm_device *device = fifo->base.engine.subdev.device;
  61. struct nvkm_instmem *imem = device->imem;
  62. unsigned long flags;
  63. u32 reg, ctx;
  64. int chid;
  65. if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
  66. return 0;
  67. spin_lock_irqsave(&fifo->base.lock, flags);
  68. nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
  69. chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
  70. if (chid == chan->base.chid)
  71. nvkm_wr32(device, reg, 0x00000000);
  72. nvkm_kmap(imem->ramfc);
  73. nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
  74. nvkm_done(imem->ramfc);
  75. nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
  76. spin_unlock_irqrestore(&fifo->base.lock, flags);
  77. return 0;
  78. }
  79. static int
  80. nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
  81. struct nvkm_engine *engine)
  82. {
  83. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  84. struct nv04_fifo *fifo = chan->fifo;
  85. struct nvkm_device *device = fifo->base.engine.subdev.device;
  86. struct nvkm_instmem *imem = device->imem;
  87. unsigned long flags;
  88. u32 inst, reg, ctx;
  89. int chid;
  90. if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
  91. return 0;
  92. inst = chan->engn[engine->subdev.index]->addr >> 4;
  93. spin_lock_irqsave(&fifo->base.lock, flags);
  94. nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
  95. chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
  96. if (chid == chan->base.chid)
  97. nvkm_wr32(device, reg, inst);
  98. nvkm_kmap(imem->ramfc);
  99. nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
  100. nvkm_done(imem->ramfc);
  101. nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
  102. spin_unlock_irqrestore(&fifo->base.lock, flags);
  103. return 0;
  104. }
  105. static void
  106. nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
  107. struct nvkm_engine *engine)
  108. {
  109. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  110. nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
  111. }
  112. static int
  113. nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
  114. struct nvkm_engine *engine,
  115. struct nvkm_object *object)
  116. {
  117. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  118. const int engn = engine->subdev.index;
  119. u32 reg, ctx;
  120. if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
  121. return 0;
  122. return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
  123. }
  124. static int
  125. nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
  126. struct nvkm_object *object)
  127. {
  128. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  129. struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
  130. u32 context = chan->base.chid << 23;
  131. u32 handle = object->handle;
  132. int hash;
  133. switch (object->engine->subdev.index) {
  134. case NVKM_ENGINE_DMAOBJ:
  135. case NVKM_ENGINE_SW : context |= 0x00000000; break;
  136. case NVKM_ENGINE_GR : context |= 0x00100000; break;
  137. case NVKM_ENGINE_MPEG : context |= 0x00200000; break;
  138. default:
  139. WARN_ON(1);
  140. return -EINVAL;
  141. }
  142. mutex_lock(&chan->fifo->base.engine.subdev.mutex);
  143. hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
  144. handle, context);
  145. mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
  146. return hash;
  147. }
  148. static const struct nvkm_fifo_chan_func
  149. nv40_fifo_dma_func = {
  150. .dtor = nv04_fifo_dma_dtor,
  151. .init = nv04_fifo_dma_init,
  152. .fini = nv04_fifo_dma_fini,
  153. .engine_ctor = nv40_fifo_dma_engine_ctor,
  154. .engine_dtor = nv40_fifo_dma_engine_dtor,
  155. .engine_init = nv40_fifo_dma_engine_init,
  156. .engine_fini = nv40_fifo_dma_engine_fini,
  157. .object_ctor = nv40_fifo_dma_object_ctor,
  158. .object_dtor = nv04_fifo_dma_object_dtor,
  159. };
  160. static int
  161. nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
  162. void *data, u32 size, struct nvkm_object **pobject)
  163. {
  164. struct nvkm_object *parent = oclass->parent;
  165. union {
  166. struct nv03_channel_dma_v0 v0;
  167. } *args = data;
  168. struct nv04_fifo *fifo = nv04_fifo(base);
  169. struct nv04_fifo_chan *chan = NULL;
  170. struct nvkm_device *device = fifo->base.engine.subdev.device;
  171. struct nvkm_instmem *imem = device->imem;
  172. int ret = -ENOSYS;
  173. nvif_ioctl(parent, "create channel dma size %d\n", size);
  174. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  175. nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
  176. "offset %08x\n", args->v0.version,
  177. args->v0.pushbuf, args->v0.offset);
  178. if (!args->v0.pushbuf)
  179. return -EINVAL;
  180. } else
  181. return ret;
  182. if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
  183. return -ENOMEM;
  184. *pobject = &chan->base.object;
  185. ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
  186. 0x1000, 0x1000, false, 0, args->v0.pushbuf,
  187. (1ULL << NVKM_ENGINE_DMAOBJ) |
  188. (1ULL << NVKM_ENGINE_GR) |
  189. (1ULL << NVKM_ENGINE_MPEG) |
  190. (1ULL << NVKM_ENGINE_SW),
  191. 0, 0xc00000, 0x1000, oclass, &chan->base);
  192. chan->fifo = fifo;
  193. if (ret)
  194. return ret;
  195. args->v0.chid = chan->base.chid;
  196. chan->ramfc = chan->base.chid * 128;
  197. nvkm_kmap(imem->ramfc);
  198. nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
  199. nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
  200. nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
  201. nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
  202. NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
  203. NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
  204. #ifdef __BIG_ENDIAN
  205. NV_PFIFO_CACHE1_BIG_ENDIAN |
  206. #endif
  207. NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
  208. nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
  209. nvkm_done(imem->ramfc);
  210. return 0;
  211. }
  212. const struct nvkm_fifo_chan_oclass
  213. nv40_fifo_dma_oclass = {
  214. .base.oclass = NV40_CHANNEL_DMA,
  215. .base.minver = 0,
  216. .base.maxver = 0,
  217. .ctor = nv40_fifo_dma_new,
  218. };