nv40.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "nv40.h"
  25. #include "regs.h"
  26. #include <core/client.h>
  27. #include <core/gpuobj.h>
  28. #include <subdev/fb.h>
  29. #include <subdev/timer.h>
  30. #include <engine/fifo.h>
  31. u64
  32. nv40_gr_units(struct nvkm_gr *gr)
  33. {
  34. return nvkm_rd32(gr->engine.subdev.device, 0x1540);
  35. }
  36. /*******************************************************************************
  37. * Graphics object classes
  38. ******************************************************************************/
  39. static int
  40. nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  41. int align, struct nvkm_gpuobj **pgpuobj)
  42. {
  43. int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
  44. false, parent, pgpuobj);
  45. if (ret == 0) {
  46. nvkm_kmap(*pgpuobj);
  47. nvkm_wo32(*pgpuobj, 0x00, object->oclass);
  48. nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
  49. nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
  50. #ifdef __BIG_ENDIAN
  51. nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
  52. #endif
  53. nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
  54. nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
  55. nvkm_done(*pgpuobj);
  56. }
  57. return ret;
  58. }
  59. const struct nvkm_object_func
  60. nv40_gr_object = {
  61. .bind = nv40_gr_object_bind,
  62. };
  63. /*******************************************************************************
  64. * PGRAPH context
  65. ******************************************************************************/
  66. static int
  67. nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  68. int align, struct nvkm_gpuobj **pgpuobj)
  69. {
  70. struct nv40_gr_chan *chan = nv40_gr_chan(object);
  71. struct nv40_gr *gr = chan->gr;
  72. int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
  73. align, true, parent, pgpuobj);
  74. if (ret == 0) {
  75. chan->inst = (*pgpuobj)->addr;
  76. nvkm_kmap(*pgpuobj);
  77. nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
  78. nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
  79. nvkm_done(*pgpuobj);
  80. }
  81. return ret;
  82. }
  83. static int
  84. nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
  85. {
  86. struct nv40_gr_chan *chan = nv40_gr_chan(object);
  87. struct nv40_gr *gr = chan->gr;
  88. struct nvkm_subdev *subdev = &gr->base.engine.subdev;
  89. struct nvkm_device *device = subdev->device;
  90. u32 inst = 0x01000000 | chan->inst >> 4;
  91. int ret = 0;
  92. nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
  93. if (nvkm_rd32(device, 0x40032c) == inst) {
  94. if (suspend) {
  95. nvkm_wr32(device, 0x400720, 0x00000000);
  96. nvkm_wr32(device, 0x400784, inst);
  97. nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
  98. nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
  99. if (nvkm_msec(device, 2000,
  100. if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
  101. break;
  102. ) < 0) {
  103. u32 insn = nvkm_rd32(device, 0x400308);
  104. nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
  105. ret = -EBUSY;
  106. }
  107. }
  108. nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
  109. }
  110. if (nvkm_rd32(device, 0x400330) == inst)
  111. nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
  112. nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
  113. return ret;
  114. }
  115. static void *
  116. nv40_gr_chan_dtor(struct nvkm_object *object)
  117. {
  118. struct nv40_gr_chan *chan = nv40_gr_chan(object);
  119. unsigned long flags;
  120. spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
  121. list_del(&chan->head);
  122. spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
  123. return chan;
  124. }
  125. static const struct nvkm_object_func
  126. nv40_gr_chan = {
  127. .dtor = nv40_gr_chan_dtor,
  128. .fini = nv40_gr_chan_fini,
  129. .bind = nv40_gr_chan_bind,
  130. };
  131. int
  132. nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
  133. const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
  134. {
  135. struct nv40_gr *gr = nv40_gr(base);
  136. struct nv40_gr_chan *chan;
  137. unsigned long flags;
  138. if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
  139. return -ENOMEM;
  140. nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
  141. chan->gr = gr;
  142. *pobject = &chan->object;
  143. spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
  144. list_add(&chan->head, &gr->chan);
  145. spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
  146. return 0;
  147. }
  148. /*******************************************************************************
  149. * PGRAPH engine/subdev functions
  150. ******************************************************************************/
  151. static void
  152. nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
  153. {
  154. struct nv40_gr *gr = nv40_gr(base);
  155. struct nvkm_device *device = gr->base.engine.subdev.device;
  156. struct nvkm_fifo *fifo = device->fifo;
  157. unsigned long flags;
  158. nvkm_fifo_pause(fifo, &flags);
  159. nv04_gr_idle(&gr->base);
  160. switch (device->chipset) {
  161. case 0x40:
  162. case 0x41:
  163. case 0x42:
  164. case 0x43:
  165. case 0x45:
  166. nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
  167. nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
  168. nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
  169. nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
  170. nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
  171. nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
  172. switch (device->chipset) {
  173. case 0x40:
  174. case 0x45:
  175. nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
  176. nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
  177. break;
  178. case 0x41:
  179. case 0x42:
  180. case 0x43:
  181. nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
  182. nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
  183. break;
  184. default:
  185. break;
  186. }
  187. break;
  188. case 0x47:
  189. case 0x49:
  190. case 0x4b:
  191. nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
  192. nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
  193. nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
  194. nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
  195. nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
  196. nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
  197. nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
  198. nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
  199. break;
  200. default:
  201. WARN_ON(1);
  202. break;
  203. }
  204. nvkm_fifo_start(fifo, &flags);
  205. }
  206. void
  207. nv40_gr_intr(struct nvkm_gr *base)
  208. {
  209. struct nv40_gr *gr = nv40_gr(base);
  210. struct nv40_gr_chan *temp, *chan = NULL;
  211. struct nvkm_subdev *subdev = &gr->base.engine.subdev;
  212. struct nvkm_device *device = subdev->device;
  213. u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
  214. u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
  215. u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
  216. u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
  217. u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
  218. u32 subc = (addr & 0x00070000) >> 16;
  219. u32 mthd = (addr & 0x00001ffc);
  220. u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
  221. u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
  222. u32 show = stat;
  223. char msg[128], src[128], sta[128];
  224. unsigned long flags;
  225. spin_lock_irqsave(&gr->base.engine.lock, flags);
  226. list_for_each_entry(temp, &gr->chan, head) {
  227. if (temp->inst >> 4 == inst) {
  228. chan = temp;
  229. list_del(&chan->head);
  230. list_add(&chan->head, &gr->chan);
  231. break;
  232. }
  233. }
  234. if (stat & NV_PGRAPH_INTR_ERROR) {
  235. if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  236. nvkm_mask(device, 0x402000, 0, 0);
  237. }
  238. }
  239. nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
  240. nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
  241. if (show) {
  242. nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
  243. nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
  244. nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
  245. nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
  246. "nstatus %08x [%s] ch %d [%08x %s] subc %d "
  247. "class %04x mthd %04x data %08x\n",
  248. show, msg, nsource, src, nstatus, sta,
  249. chan ? chan->fifo->chid : -1, inst << 4,
  250. chan ? chan->fifo->object.client->name : "unknown",
  251. subc, class, mthd, data);
  252. }
  253. spin_unlock_irqrestore(&gr->base.engine.lock, flags);
  254. }
  255. int
  256. nv40_gr_init(struct nvkm_gr *base)
  257. {
  258. struct nv40_gr *gr = nv40_gr(base);
  259. struct nvkm_device *device = gr->base.engine.subdev.device;
  260. int ret, i, j;
  261. u32 vramsz;
  262. /* generate and upload context program */
  263. ret = nv40_grctx_init(device, &gr->size);
  264. if (ret)
  265. return ret;
  266. /* No context present currently */
  267. nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
  268. nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
  269. nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
  270. nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
  271. nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
  272. nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
  273. nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
  274. nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
  275. nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
  276. nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
  277. nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
  278. j = nvkm_rd32(device, 0x1540) & 0xff;
  279. if (j) {
  280. for (i = 0; !(j & 1); j >>= 1, i++)
  281. ;
  282. nvkm_wr32(device, 0x405000, i);
  283. }
  284. if (device->chipset == 0x40) {
  285. nvkm_wr32(device, 0x4009b0, 0x83280fff);
  286. nvkm_wr32(device, 0x4009b4, 0x000000a0);
  287. } else {
  288. nvkm_wr32(device, 0x400820, 0x83280eff);
  289. nvkm_wr32(device, 0x400824, 0x000000a0);
  290. }
  291. switch (device->chipset) {
  292. case 0x40:
  293. case 0x45:
  294. nvkm_wr32(device, 0x4009b8, 0x0078e366);
  295. nvkm_wr32(device, 0x4009bc, 0x0000014c);
  296. break;
  297. case 0x41:
  298. case 0x42: /* pciid also 0x00Cx */
  299. /* case 0x0120: XXX (pciid) */
  300. nvkm_wr32(device, 0x400828, 0x007596ff);
  301. nvkm_wr32(device, 0x40082c, 0x00000108);
  302. break;
  303. case 0x43:
  304. nvkm_wr32(device, 0x400828, 0x0072cb77);
  305. nvkm_wr32(device, 0x40082c, 0x00000108);
  306. break;
  307. case 0x44:
  308. case 0x46: /* G72 */
  309. case 0x4a:
  310. case 0x4c: /* G7x-based C51 */
  311. case 0x4e:
  312. nvkm_wr32(device, 0x400860, 0);
  313. nvkm_wr32(device, 0x400864, 0);
  314. break;
  315. case 0x47: /* G70 */
  316. case 0x49: /* G71 */
  317. case 0x4b: /* G73 */
  318. nvkm_wr32(device, 0x400828, 0x07830610);
  319. nvkm_wr32(device, 0x40082c, 0x0000016A);
  320. break;
  321. default:
  322. break;
  323. }
  324. nvkm_wr32(device, 0x400b38, 0x2ffff800);
  325. nvkm_wr32(device, 0x400b3c, 0x00006000);
  326. /* Tiling related stuff. */
  327. switch (device->chipset) {
  328. case 0x44:
  329. case 0x4a:
  330. nvkm_wr32(device, 0x400bc4, 0x1003d888);
  331. nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
  332. break;
  333. case 0x46:
  334. nvkm_wr32(device, 0x400bc4, 0x0000e024);
  335. nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
  336. break;
  337. case 0x4c:
  338. case 0x4e:
  339. case 0x67:
  340. nvkm_wr32(device, 0x400bc4, 0x1003d888);
  341. nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
  342. break;
  343. default:
  344. break;
  345. }
  346. /* begin RAM config */
  347. vramsz = device->func->resource_size(device, 1) - 1;
  348. switch (device->chipset) {
  349. case 0x40:
  350. nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
  351. nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
  352. nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
  353. nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
  354. nvkm_wr32(device, 0x400820, 0);
  355. nvkm_wr32(device, 0x400824, 0);
  356. nvkm_wr32(device, 0x400864, vramsz);
  357. nvkm_wr32(device, 0x400868, vramsz);
  358. break;
  359. default:
  360. switch (device->chipset) {
  361. case 0x41:
  362. case 0x42:
  363. case 0x43:
  364. case 0x45:
  365. case 0x4e:
  366. case 0x44:
  367. case 0x4a:
  368. nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
  369. nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
  370. break;
  371. default:
  372. nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
  373. nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
  374. break;
  375. }
  376. nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
  377. nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
  378. nvkm_wr32(device, 0x400840, 0);
  379. nvkm_wr32(device, 0x400844, 0);
  380. nvkm_wr32(device, 0x4008A0, vramsz);
  381. nvkm_wr32(device, 0x4008A4, vramsz);
  382. break;
  383. }
  384. return 0;
  385. }
  386. int
  387. nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
  388. int index, struct nvkm_gr **pgr)
  389. {
  390. struct nv40_gr *gr;
  391. if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
  392. return -ENOMEM;
  393. *pgr = &gr->base;
  394. INIT_LIST_HEAD(&gr->chan);
  395. return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
  396. }
  397. static const struct nvkm_gr_func
  398. nv40_gr = {
  399. .init = nv40_gr_init,
  400. .intr = nv40_gr_intr,
  401. .tile = nv40_gr_tile,
  402. .units = nv40_gr_units,
  403. .chan_new = nv40_gr_chan_new,
  404. .sclass = {
  405. { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
  406. { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
  407. { -1, -1, 0x0030, &nv40_gr_object }, /* null */
  408. { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
  409. { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
  410. { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
  411. { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
  412. { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
  413. { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
  414. { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
  415. { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
  416. { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
  417. { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
  418. { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
  419. { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
  420. { -1, -1, 0x4097, &nv40_gr_object }, /* curie */
  421. {}
  422. }
  423. };
  424. int
  425. nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
  426. {
  427. return nv40_gr_new_(&nv40_gr, device, index, pgr);
  428. }