dmanv04.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "channv04.h"
  25. #include "regsnv04.h"
  26. #include <core/client.h>
  27. #include <core/ramht.h>
  28. #include <subdev/instmem.h>
  29. #include <nvif/class.h>
  30. #include <nvif/cl006b.h>
  31. #include <nvif/unpack.h>
  32. void
  33. nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
  34. {
  35. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  36. struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
  37. mutex_lock(&chan->fifo->base.engine.subdev.mutex);
  38. nvkm_ramht_remove(imem->ramht, cookie);
  39. mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
  40. }
  41. static int
  42. nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
  43. struct nvkm_object *object)
  44. {
  45. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  46. struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
  47. u32 context = 0x80000000 | chan->base.chid << 24;
  48. u32 handle = object->handle;
  49. int hash;
  50. switch (object->engine->subdev.index) {
  51. case NVKM_ENGINE_DMAOBJ:
  52. case NVKM_ENGINE_SW : context |= 0x00000000; break;
  53. case NVKM_ENGINE_GR : context |= 0x00010000; break;
  54. case NVKM_ENGINE_MPEG : context |= 0x00020000; break;
  55. default:
  56. WARN_ON(1);
  57. return -EINVAL;
  58. }
  59. mutex_lock(&chan->fifo->base.engine.subdev.mutex);
  60. hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
  61. handle, context);
  62. mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
  63. return hash;
  64. }
  65. void
  66. nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
  67. {
  68. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  69. struct nv04_fifo *fifo = chan->fifo;
  70. struct nvkm_device *device = fifo->base.engine.subdev.device;
  71. struct nvkm_memory *fctx = device->imem->ramfc;
  72. const struct nv04_fifo_ramfc *c;
  73. unsigned long flags;
  74. u32 mask = fifo->base.nr - 1;
  75. u32 data = chan->ramfc;
  76. u32 chid;
  77. /* prevent fifo context switches */
  78. spin_lock_irqsave(&fifo->base.lock, flags);
  79. nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
  80. /* if this channel is active, replace it with a null context */
  81. chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
  82. if (chid == chan->base.chid) {
  83. nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
  84. nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
  85. nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
  86. c = fifo->ramfc;
  87. do {
  88. u32 rm = ((1ULL << c->bits) - 1) << c->regs;
  89. u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
  90. u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
  91. u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
  92. nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
  93. } while ((++c)->bits);
  94. c = fifo->ramfc;
  95. do {
  96. nvkm_wr32(device, c->regp, 0x00000000);
  97. } while ((++c)->bits);
  98. nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
  99. nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
  100. nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
  101. nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
  102. nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
  103. }
  104. /* restore normal operation, after disabling dma mode */
  105. nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
  106. nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
  107. spin_unlock_irqrestore(&fifo->base.lock, flags);
  108. }
  109. void
  110. nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
  111. {
  112. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  113. struct nv04_fifo *fifo = chan->fifo;
  114. struct nvkm_device *device = fifo->base.engine.subdev.device;
  115. u32 mask = 1 << chan->base.chid;
  116. unsigned long flags;
  117. spin_lock_irqsave(&fifo->base.lock, flags);
  118. nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
  119. spin_unlock_irqrestore(&fifo->base.lock, flags);
  120. }
  121. void *
  122. nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
  123. {
  124. struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
  125. struct nv04_fifo *fifo = chan->fifo;
  126. struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
  127. const struct nv04_fifo_ramfc *c = fifo->ramfc;
  128. nvkm_kmap(imem->ramfc);
  129. do {
  130. nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
  131. } while ((++c)->bits);
  132. nvkm_done(imem->ramfc);
  133. return chan;
  134. }
  135. const struct nvkm_fifo_chan_func
  136. nv04_fifo_dma_func = {
  137. .dtor = nv04_fifo_dma_dtor,
  138. .init = nv04_fifo_dma_init,
  139. .fini = nv04_fifo_dma_fini,
  140. .object_ctor = nv04_fifo_dma_object_ctor,
  141. .object_dtor = nv04_fifo_dma_object_dtor,
  142. };
  143. static int
  144. nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
  145. void *data, u32 size, struct nvkm_object **pobject)
  146. {
  147. struct nvkm_object *parent = oclass->parent;
  148. union {
  149. struct nv03_channel_dma_v0 v0;
  150. } *args = data;
  151. struct nv04_fifo *fifo = nv04_fifo(base);
  152. struct nv04_fifo_chan *chan = NULL;
  153. struct nvkm_device *device = fifo->base.engine.subdev.device;
  154. struct nvkm_instmem *imem = device->imem;
  155. int ret = -ENOSYS;
  156. nvif_ioctl(parent, "create channel dma size %d\n", size);
  157. if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
  158. nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
  159. "offset %08x\n", args->v0.version,
  160. args->v0.pushbuf, args->v0.offset);
  161. if (!args->v0.pushbuf)
  162. return -EINVAL;
  163. } else
  164. return ret;
  165. if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
  166. return -ENOMEM;
  167. *pobject = &chan->base.object;
  168. ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
  169. 0x1000, 0x1000, false, 0, args->v0.pushbuf,
  170. (1ULL << NVKM_ENGINE_DMAOBJ) |
  171. (1ULL << NVKM_ENGINE_GR) |
  172. (1ULL << NVKM_ENGINE_SW),
  173. 0, 0x800000, 0x10000, oclass, &chan->base);
  174. chan->fifo = fifo;
  175. if (ret)
  176. return ret;
  177. args->v0.chid = chan->base.chid;
  178. chan->ramfc = chan->base.chid * 32;
  179. nvkm_kmap(imem->ramfc);
  180. nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
  181. nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
  182. nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
  183. nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
  184. NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
  185. NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
  186. #ifdef __BIG_ENDIAN
  187. NV_PFIFO_CACHE1_BIG_ENDIAN |
  188. #endif
  189. NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
  190. nvkm_done(imem->ramfc);
  191. return 0;
  192. }
  193. const struct nvkm_fifo_chan_oclass
  194. nv04_fifo_dma_oclass = {
  195. .base.oclass = NV03_CHANNEL_DMA,
  196. .base.minver = 0,
  197. .base.maxver = 0,
  198. .ctor = nv04_fifo_dma_new,
  199. };