chan.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "chan.h"
  25. #include <core/client.h>
  26. #include <core/gpuobj.h>
  27. #include <core/oproxy.h>
  28. #include <subdev/mmu.h>
  29. #include <engine/dma.h>
  30. struct nvkm_fifo_chan_object {
  31. struct nvkm_oproxy oproxy;
  32. struct nvkm_fifo_chan *chan;
  33. int hash;
  34. };
  35. static int
  36. nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
  37. {
  38. struct nvkm_fifo_chan_object *object =
  39. container_of(base, typeof(*object), oproxy);
  40. struct nvkm_engine *engine = object->oproxy.object->engine;
  41. struct nvkm_fifo_chan *chan = object->chan;
  42. struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
  43. const char *name = nvkm_subdev_name[engine->subdev.index];
  44. int ret = 0;
  45. if (--engn->usecount)
  46. return 0;
  47. if (chan->func->engine_fini) {
  48. ret = chan->func->engine_fini(chan, engine, suspend);
  49. if (ret) {
  50. nvif_error(&chan->object,
  51. "detach %s failed, %d\n", name, ret);
  52. return ret;
  53. }
  54. }
  55. if (engn->object) {
  56. ret = nvkm_object_fini(engn->object, suspend);
  57. if (ret && suspend)
  58. return ret;
  59. }
  60. nvif_trace(&chan->object, "detached %s\n", name);
  61. return ret;
  62. }
  63. static int
  64. nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
  65. {
  66. struct nvkm_fifo_chan_object *object =
  67. container_of(base, typeof(*object), oproxy);
  68. struct nvkm_engine *engine = object->oproxy.object->engine;
  69. struct nvkm_fifo_chan *chan = object->chan;
  70. struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
  71. const char *name = nvkm_subdev_name[engine->subdev.index];
  72. int ret;
  73. if (engn->usecount++)
  74. return 0;
  75. if (engn->object) {
  76. ret = nvkm_object_init(engn->object);
  77. if (ret)
  78. return ret;
  79. }
  80. if (chan->func->engine_init) {
  81. ret = chan->func->engine_init(chan, engine);
  82. if (ret) {
  83. nvif_error(&chan->object,
  84. "attach %s failed, %d\n", name, ret);
  85. return ret;
  86. }
  87. }
  88. nvif_trace(&chan->object, "attached %s\n", name);
  89. return 0;
  90. }
  91. static void
  92. nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
  93. {
  94. struct nvkm_fifo_chan_object *object =
  95. container_of(base, typeof(*object), oproxy);
  96. struct nvkm_engine *engine = object->oproxy.base.engine;
  97. struct nvkm_fifo_chan *chan = object->chan;
  98. struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
  99. if (chan->func->object_dtor)
  100. chan->func->object_dtor(chan, object->hash);
  101. if (!--engn->refcount) {
  102. if (chan->func->engine_dtor)
  103. chan->func->engine_dtor(chan, engine);
  104. nvkm_object_del(&engn->object);
  105. if (chan->vm)
  106. atomic_dec(&chan->vm->engref[engine->subdev.index]);
  107. }
  108. }
  109. static const struct nvkm_oproxy_func
  110. nvkm_fifo_chan_child_func = {
  111. .dtor[0] = nvkm_fifo_chan_child_del,
  112. .init[0] = nvkm_fifo_chan_child_init,
  113. .fini[0] = nvkm_fifo_chan_child_fini,
  114. };
  115. static int
  116. nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
  117. struct nvkm_object **pobject)
  118. {
  119. struct nvkm_engine *engine = oclass->engine;
  120. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
  121. struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
  122. struct nvkm_fifo_chan_object *object;
  123. int ret = 0;
  124. if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
  125. return -ENOMEM;
  126. nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
  127. object->chan = chan;
  128. *pobject = &object->oproxy.base;
  129. if (!engn->refcount++) {
  130. struct nvkm_oclass cclass = {
  131. .client = oclass->client,
  132. .engine = oclass->engine,
  133. };
  134. if (chan->vm)
  135. atomic_inc(&chan->vm->engref[engine->subdev.index]);
  136. if (engine->func->fifo.cclass) {
  137. ret = engine->func->fifo.cclass(chan, &cclass,
  138. &engn->object);
  139. } else
  140. if (engine->func->cclass) {
  141. ret = nvkm_object_new_(engine->func->cclass, &cclass,
  142. NULL, 0, &engn->object);
  143. }
  144. if (ret)
  145. return ret;
  146. if (chan->func->engine_ctor) {
  147. ret = chan->func->engine_ctor(chan, oclass->engine,
  148. engn->object);
  149. if (ret)
  150. return ret;
  151. }
  152. }
  153. ret = oclass->base.ctor(&(const struct nvkm_oclass) {
  154. .base = oclass->base,
  155. .engn = oclass->engn,
  156. .handle = oclass->handle,
  157. .object = oclass->object,
  158. .client = oclass->client,
  159. .parent = engn->object ?
  160. engn->object :
  161. oclass->parent,
  162. .engine = engine,
  163. }, data, size, &object->oproxy.object);
  164. if (ret)
  165. return ret;
  166. if (chan->func->object_ctor) {
  167. object->hash =
  168. chan->func->object_ctor(chan, object->oproxy.object);
  169. if (object->hash < 0)
  170. return object->hash;
  171. }
  172. return 0;
  173. }
  174. static int
  175. nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
  176. struct nvkm_oclass *oclass)
  177. {
  178. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  179. struct nvkm_fifo *fifo = chan->fifo;
  180. struct nvkm_device *device = fifo->engine.subdev.device;
  181. struct nvkm_engine *engine;
  182. u64 mask = chan->engines;
  183. int ret, i, c;
  184. for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
  185. if (!(engine = nvkm_device_engine(device, i)))
  186. continue;
  187. oclass->engine = engine;
  188. oclass->base.oclass = 0;
  189. if (engine->func->fifo.sclass) {
  190. ret = engine->func->fifo.sclass(oclass, index);
  191. if (oclass->base.oclass) {
  192. if (!oclass->base.ctor)
  193. oclass->base.ctor = nvkm_object_new;
  194. oclass->ctor = nvkm_fifo_chan_child_new;
  195. return 0;
  196. }
  197. index -= ret;
  198. continue;
  199. }
  200. while (engine->func->sclass[c].oclass) {
  201. if (c++ == index) {
  202. oclass->base = engine->func->sclass[index];
  203. if (!oclass->base.ctor)
  204. oclass->base.ctor = nvkm_object_new;
  205. oclass->ctor = nvkm_fifo_chan_child_new;
  206. return 0;
  207. }
  208. }
  209. index -= c;
  210. }
  211. return -EINVAL;
  212. }
  213. static int
  214. nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
  215. struct nvkm_event **pevent)
  216. {
  217. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  218. if (chan->func->ntfy)
  219. return chan->func->ntfy(chan, type, pevent);
  220. return -ENODEV;
  221. }
  222. static int
  223. nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
  224. {
  225. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  226. *addr = chan->addr;
  227. *size = chan->size;
  228. return 0;
  229. }
  230. static int
  231. nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
  232. {
  233. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  234. if (unlikely(!chan->user)) {
  235. chan->user = ioremap(chan->addr, chan->size);
  236. if (!chan->user)
  237. return -ENOMEM;
  238. }
  239. if (unlikely(addr + 4 > chan->size))
  240. return -EINVAL;
  241. *data = ioread32_native(chan->user + addr);
  242. return 0;
  243. }
  244. static int
  245. nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
  246. {
  247. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  248. if (unlikely(!chan->user)) {
  249. chan->user = ioremap(chan->addr, chan->size);
  250. if (!chan->user)
  251. return -ENOMEM;
  252. }
  253. if (unlikely(addr + 4 > chan->size))
  254. return -EINVAL;
  255. iowrite32_native(data, chan->user + addr);
  256. return 0;
  257. }
  258. static int
  259. nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
  260. {
  261. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  262. chan->func->fini(chan);
  263. return 0;
  264. }
  265. static int
  266. nvkm_fifo_chan_init(struct nvkm_object *object)
  267. {
  268. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  269. chan->func->init(chan);
  270. return 0;
  271. }
  272. static void *
  273. nvkm_fifo_chan_dtor(struct nvkm_object *object)
  274. {
  275. struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
  276. struct nvkm_fifo *fifo = chan->fifo;
  277. void *data = chan->func->dtor(chan);
  278. unsigned long flags;
  279. spin_lock_irqsave(&fifo->lock, flags);
  280. if (!list_empty(&chan->head)) {
  281. __clear_bit(chan->chid, fifo->mask);
  282. list_del(&chan->head);
  283. }
  284. spin_unlock_irqrestore(&fifo->lock, flags);
  285. if (chan->user)
  286. iounmap(chan->user);
  287. nvkm_vm_ref(NULL, &chan->vm, NULL);
  288. nvkm_gpuobj_del(&chan->push);
  289. nvkm_gpuobj_del(&chan->inst);
  290. return data;
  291. }
  292. static const struct nvkm_object_func
  293. nvkm_fifo_chan_func = {
  294. .dtor = nvkm_fifo_chan_dtor,
  295. .init = nvkm_fifo_chan_init,
  296. .fini = nvkm_fifo_chan_fini,
  297. .ntfy = nvkm_fifo_chan_ntfy,
  298. .map = nvkm_fifo_chan_map,
  299. .rd32 = nvkm_fifo_chan_rd32,
  300. .wr32 = nvkm_fifo_chan_wr32,
  301. .sclass = nvkm_fifo_chan_child_get,
  302. };
  303. int
  304. nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
  305. struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
  306. u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
  307. const struct nvkm_oclass *oclass,
  308. struct nvkm_fifo_chan *chan)
  309. {
  310. struct nvkm_client *client = oclass->client;
  311. struct nvkm_device *device = fifo->engine.subdev.device;
  312. struct nvkm_mmu *mmu = device->mmu;
  313. struct nvkm_dmaobj *dmaobj;
  314. unsigned long flags;
  315. int ret;
  316. nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
  317. chan->func = func;
  318. chan->fifo = fifo;
  319. chan->engines = engines;
  320. INIT_LIST_HEAD(&chan->head);
  321. /* instance memory */
  322. ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
  323. if (ret)
  324. return ret;
  325. /* allocate push buffer ctxdma instance */
  326. if (push) {
  327. dmaobj = nvkm_dmaobj_search(client, push);
  328. if (IS_ERR(dmaobj))
  329. return PTR_ERR(dmaobj);
  330. ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
  331. &chan->push);
  332. if (ret)
  333. return ret;
  334. }
  335. /* channel address space */
  336. if (!vm && mmu) {
  337. if (!client->vm || client->vm->mmu == mmu) {
  338. ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
  339. if (ret)
  340. return ret;
  341. } else {
  342. return -EINVAL;
  343. }
  344. } else {
  345. return -ENOENT;
  346. }
  347. /* allocate channel id */
  348. spin_lock_irqsave(&fifo->lock, flags);
  349. chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
  350. if (chan->chid >= NVKM_FIFO_CHID_NR) {
  351. spin_unlock_irqrestore(&fifo->lock, flags);
  352. return -ENOSPC;
  353. }
  354. list_add(&chan->head, &fifo->chan);
  355. __set_bit(chan->chid, fifo->mask);
  356. spin_unlock_irqrestore(&fifo->lock, flags);
  357. /* determine address of this channel's user registers */
  358. chan->addr = device->func->resource_addr(device, bar) +
  359. base + user * chan->chid;
  360. chan->size = user;
  361. nvkm_fifo_cevent(fifo);
  362. return 0;
  363. }