nv31.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "nv31.h"
  25. #include <core/client.h>
  26. #include <core/gpuobj.h>
  27. #include <subdev/fb.h>
  28. #include <subdev/timer.h>
  29. #include <engine/fifo.h>
  30. #include <nvif/class.h>
  31. /*******************************************************************************
  32. * MPEG object classes
  33. ******************************************************************************/
  34. static int
  35. nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  36. int align, struct nvkm_gpuobj **pgpuobj)
  37. {
  38. int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
  39. false, parent, pgpuobj);
  40. if (ret == 0) {
  41. nvkm_kmap(*pgpuobj);
  42. nvkm_wo32(*pgpuobj, 0x00, object->oclass);
  43. nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
  44. nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
  45. nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
  46. nvkm_done(*pgpuobj);
  47. }
  48. return ret;
  49. }
  50. const struct nvkm_object_func
  51. nv31_mpeg_object = {
  52. .bind = nv31_mpeg_object_bind,
  53. };
  54. /*******************************************************************************
  55. * PMPEG context
  56. ******************************************************************************/
  57. static void *
  58. nv31_mpeg_chan_dtor(struct nvkm_object *object)
  59. {
  60. struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
  61. struct nv31_mpeg *mpeg = chan->mpeg;
  62. unsigned long flags;
  63. spin_lock_irqsave(&mpeg->engine.lock, flags);
  64. if (mpeg->chan == chan)
  65. mpeg->chan = NULL;
  66. spin_unlock_irqrestore(&mpeg->engine.lock, flags);
  67. return chan;
  68. }
  69. static const struct nvkm_object_func
  70. nv31_mpeg_chan = {
  71. .dtor = nv31_mpeg_chan_dtor,
  72. };
  73. int
  74. nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
  75. const struct nvkm_oclass *oclass,
  76. struct nvkm_object **pobject)
  77. {
  78. struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
  79. struct nv31_mpeg_chan *chan;
  80. unsigned long flags;
  81. int ret = -EBUSY;
  82. if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
  83. return -ENOMEM;
  84. nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
  85. chan->mpeg = mpeg;
  86. chan->fifo = fifoch;
  87. *pobject = &chan->object;
  88. spin_lock_irqsave(&mpeg->engine.lock, flags);
  89. if (!mpeg->chan) {
  90. mpeg->chan = chan;
  91. ret = 0;
  92. }
  93. spin_unlock_irqrestore(&mpeg->engine.lock, flags);
  94. return ret;
  95. }
  96. /*******************************************************************************
  97. * PMPEG engine/subdev functions
  98. ******************************************************************************/
  99. void
  100. nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
  101. {
  102. struct nv31_mpeg *mpeg = nv31_mpeg(engine);
  103. struct nvkm_device *device = mpeg->engine.subdev.device;
  104. nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
  105. nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
  106. nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
  107. }
  108. static bool
  109. nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
  110. {
  111. u32 inst = data << 4;
  112. u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
  113. u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
  114. u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
  115. u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
  116. u32 size = dma1 + 1;
  117. /* only allow linear DMA objects */
  118. if (!(dma0 & 0x00002000))
  119. return false;
  120. if (mthd == 0x0190) {
  121. /* DMA_CMD */
  122. nvkm_mask(device, 0x00b300, 0x00010000,
  123. (dma0 & 0x00030000) ? 0x00010000 : 0);
  124. nvkm_wr32(device, 0x00b334, base);
  125. nvkm_wr32(device, 0x00b324, size);
  126. } else
  127. if (mthd == 0x01a0) {
  128. /* DMA_DATA */
  129. nvkm_mask(device, 0x00b300, 0x00020000,
  130. (dma0 & 0x00030000) ? 0x00020000 : 0);
  131. nvkm_wr32(device, 0x00b360, base);
  132. nvkm_wr32(device, 0x00b364, size);
  133. } else {
  134. /* DMA_IMAGE, VRAM only */
  135. if (dma0 & 0x00030000)
  136. return false;
  137. nvkm_wr32(device, 0x00b370, base);
  138. nvkm_wr32(device, 0x00b374, size);
  139. }
  140. return true;
  141. }
  142. static bool
  143. nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
  144. {
  145. struct nvkm_device *device = mpeg->engine.subdev.device;
  146. switch (mthd) {
  147. case 0x190:
  148. case 0x1a0:
  149. case 0x1b0:
  150. return mpeg->func->mthd_dma(device, mthd, data);
  151. default:
  152. break;
  153. }
  154. return false;
  155. }
  156. static void
  157. nv31_mpeg_intr(struct nvkm_engine *engine)
  158. {
  159. struct nv31_mpeg *mpeg = nv31_mpeg(engine);
  160. struct nvkm_subdev *subdev = &mpeg->engine.subdev;
  161. struct nvkm_device *device = subdev->device;
  162. u32 stat = nvkm_rd32(device, 0x00b100);
  163. u32 type = nvkm_rd32(device, 0x00b230);
  164. u32 mthd = nvkm_rd32(device, 0x00b234);
  165. u32 data = nvkm_rd32(device, 0x00b238);
  166. u32 show = stat;
  167. unsigned long flags;
  168. spin_lock_irqsave(&mpeg->engine.lock, flags);
  169. if (stat & 0x01000000) {
  170. /* happens on initial binding of the object */
  171. if (type == 0x00000020 && mthd == 0x0000) {
  172. nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
  173. show &= ~0x01000000;
  174. }
  175. if (type == 0x00000010) {
  176. if (!nv31_mpeg_mthd(mpeg, mthd, data))
  177. show &= ~0x01000000;
  178. }
  179. }
  180. nvkm_wr32(device, 0x00b100, stat);
  181. nvkm_wr32(device, 0x00b230, 0x00000001);
  182. if (show) {
  183. nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
  184. mpeg->chan ? mpeg->chan->fifo->chid : -1,
  185. mpeg->chan ? mpeg->chan->object.client->name :
  186. "unknown", stat, type, mthd, data);
  187. }
  188. spin_unlock_irqrestore(&mpeg->engine.lock, flags);
  189. }
  190. int
  191. nv31_mpeg_init(struct nvkm_engine *mpeg)
  192. {
  193. struct nvkm_subdev *subdev = &mpeg->subdev;
  194. struct nvkm_device *device = subdev->device;
  195. /* VPE init */
  196. nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
  197. nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
  198. /* PMPEG init */
  199. nvkm_wr32(device, 0x00b32c, 0x00000000);
  200. nvkm_wr32(device, 0x00b314, 0x00000100);
  201. nvkm_wr32(device, 0x00b220, 0x00000031);
  202. nvkm_wr32(device, 0x00b300, 0x02001ec1);
  203. nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
  204. nvkm_wr32(device, 0x00b100, 0xffffffff);
  205. nvkm_wr32(device, 0x00b140, 0xffffffff);
  206. if (nvkm_msec(device, 2000,
  207. if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
  208. break;
  209. ) < 0) {
  210. nvkm_error(subdev, "timeout %08x\n",
  211. nvkm_rd32(device, 0x00b200));
  212. return -EBUSY;
  213. }
  214. return 0;
  215. }
  216. static void *
  217. nv31_mpeg_dtor(struct nvkm_engine *engine)
  218. {
  219. return nv31_mpeg(engine);
  220. }
  221. static const struct nvkm_engine_func
  222. nv31_mpeg_ = {
  223. .dtor = nv31_mpeg_dtor,
  224. .init = nv31_mpeg_init,
  225. .intr = nv31_mpeg_intr,
  226. .tile = nv31_mpeg_tile,
  227. .fifo.cclass = nv31_mpeg_chan_new,
  228. .sclass = {
  229. { -1, -1, NV31_MPEG, &nv31_mpeg_object },
  230. {}
  231. }
  232. };
  233. int
  234. nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
  235. int index, struct nvkm_engine **pmpeg)
  236. {
  237. struct nv31_mpeg *mpeg;
  238. if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
  239. return -ENOMEM;
  240. mpeg->func = func;
  241. *pmpeg = &mpeg->engine;
  242. return nvkm_engine_ctor(&nv31_mpeg_, device, index,
  243. true, &mpeg->engine);
  244. }
  245. static const struct nv31_mpeg_func
  246. nv31_mpeg = {
  247. .mthd_dma = nv31_mpeg_mthd_dma,
  248. };
  249. int
  250. nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
  251. {
  252. return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
  253. }