nv40.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "nv40.h"
  25. #include "regs.h"
  26. #include <core/client.h>
  27. #include <core/gpuobj.h>
  28. #include <subdev/fb.h>
  29. #include <subdev/timer.h>
  30. #include <engine/fifo.h>
  31. u64
  32. nv40_gr_units(struct nvkm_gr *gr)
  33. {
  34. return nvkm_rd32(gr->engine.subdev.device, 0x1540);
  35. }
  36. /*******************************************************************************
  37. * Graphics object classes
  38. ******************************************************************************/
  39. static int
  40. nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  41. int align, struct nvkm_gpuobj **pgpuobj)
  42. {
  43. int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
  44. false, parent, pgpuobj);
  45. if (ret == 0) {
  46. nvkm_kmap(*pgpuobj);
  47. nvkm_wo32(*pgpuobj, 0x00, object->oclass);
  48. nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
  49. nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
  50. #ifdef __BIG_ENDIAN
  51. nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
  52. #endif
  53. nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
  54. nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
  55. nvkm_done(*pgpuobj);
  56. }
  57. return ret;
  58. }
  59. const struct nvkm_object_func
  60. nv40_gr_object = {
  61. .bind = nv40_gr_object_bind,
  62. };
  63. /*******************************************************************************
  64. * PGRAPH context
  65. ******************************************************************************/
  66. static int
  67. nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
  68. int align, struct nvkm_gpuobj **pgpuobj)
  69. {
  70. struct nv40_gr_chan *chan = nv40_gr_chan(object);
  71. struct nv40_gr *gr = chan->gr;
  72. int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
  73. align, true, parent, pgpuobj);
  74. if (ret == 0) {
  75. chan->inst = (*pgpuobj)->addr;
  76. nvkm_kmap(*pgpuobj);
  77. nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
  78. nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
  79. nvkm_done(*pgpuobj);
  80. }
  81. return ret;
  82. }
  83. static int
  84. nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
  85. {
  86. struct nv40_gr_chan *chan = nv40_gr_chan(object);
  87. struct nv40_gr *gr = chan->gr;
  88. struct nvkm_subdev *subdev = &gr->base.engine.subdev;
  89. struct nvkm_device *device = subdev->device;
  90. u32 inst = 0x01000000 | chan->inst >> 4;
  91. int ret = 0;
  92. nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
  93. if (nvkm_rd32(device, 0x40032c) == inst) {
  94. if (suspend) {
  95. nvkm_wr32(device, 0x400720, 0x00000000);
  96. nvkm_wr32(device, 0x400784, inst);
  97. nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
  98. nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
  99. if (nvkm_msec(device, 2000,
  100. if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
  101. break;
  102. ) < 0) {
  103. u32 insn = nvkm_rd32(device, 0x400308);
  104. nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
  105. ret = -EBUSY;
  106. }
  107. }
  108. nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
  109. }
  110. if (nvkm_rd32(device, 0x400330) == inst)
  111. nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
  112. nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
  113. return ret;
  114. }
  115. static void *
  116. nv40_gr_chan_dtor(struct nvkm_object *object)
  117. {
  118. struct nv40_gr_chan *chan = nv40_gr_chan(object);
  119. unsigned long flags;
  120. spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
  121. list_del(&chan->head);
  122. spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
  123. return chan;
  124. }
  125. static const struct nvkm_object_func
  126. nv40_gr_chan = {
  127. .dtor = nv40_gr_chan_dtor,
  128. .fini = nv40_gr_chan_fini,
  129. .bind = nv40_gr_chan_bind,
  130. };
  131. int
  132. nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
  133. const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
  134. {
  135. struct nv40_gr *gr = nv40_gr(base);
  136. struct nv40_gr_chan *chan;
  137. unsigned long flags;
  138. if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
  139. return -ENOMEM;
  140. nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
  141. chan->gr = gr;
  142. chan->fifo = fifoch;
  143. *pobject = &chan->object;
  144. spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
  145. list_add(&chan->head, &gr->chan);
  146. spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
  147. return 0;
  148. }
  149. /*******************************************************************************
  150. * PGRAPH engine/subdev functions
  151. ******************************************************************************/
  152. static void
  153. nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
  154. {
  155. struct nv40_gr *gr = nv40_gr(base);
  156. struct nvkm_device *device = gr->base.engine.subdev.device;
  157. struct nvkm_fifo *fifo = device->fifo;
  158. unsigned long flags;
  159. nvkm_fifo_pause(fifo, &flags);
  160. nv04_gr_idle(&gr->base);
  161. switch (device->chipset) {
  162. case 0x40:
  163. case 0x41:
  164. case 0x42:
  165. case 0x43:
  166. case 0x45:
  167. nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
  168. nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
  169. nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
  170. nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
  171. nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
  172. nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
  173. switch (device->chipset) {
  174. case 0x40:
  175. case 0x45:
  176. nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
  177. nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
  178. break;
  179. case 0x41:
  180. case 0x42:
  181. case 0x43:
  182. nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
  183. nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
  184. break;
  185. default:
  186. break;
  187. }
  188. break;
  189. case 0x47:
  190. case 0x49:
  191. case 0x4b:
  192. nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
  193. nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
  194. nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
  195. nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
  196. nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
  197. nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
  198. nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
  199. nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
  200. break;
  201. default:
  202. WARN_ON(1);
  203. break;
  204. }
  205. nvkm_fifo_start(fifo, &flags);
  206. }
  207. void
  208. nv40_gr_intr(struct nvkm_gr *base)
  209. {
  210. struct nv40_gr *gr = nv40_gr(base);
  211. struct nv40_gr_chan *temp, *chan = NULL;
  212. struct nvkm_subdev *subdev = &gr->base.engine.subdev;
  213. struct nvkm_device *device = subdev->device;
  214. u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
  215. u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
  216. u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
  217. u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
  218. u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
  219. u32 subc = (addr & 0x00070000) >> 16;
  220. u32 mthd = (addr & 0x00001ffc);
  221. u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
  222. u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
  223. u32 show = stat;
  224. char msg[128], src[128], sta[128];
  225. unsigned long flags;
  226. spin_lock_irqsave(&gr->base.engine.lock, flags);
  227. list_for_each_entry(temp, &gr->chan, head) {
  228. if (temp->inst >> 4 == inst) {
  229. chan = temp;
  230. list_del(&chan->head);
  231. list_add(&chan->head, &gr->chan);
  232. break;
  233. }
  234. }
  235. if (stat & NV_PGRAPH_INTR_ERROR) {
  236. if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  237. nvkm_mask(device, 0x402000, 0, 0);
  238. }
  239. }
  240. nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
  241. nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
  242. if (show) {
  243. nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
  244. nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
  245. nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
  246. nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
  247. "nstatus %08x [%s] ch %d [%08x %s] subc %d "
  248. "class %04x mthd %04x data %08x\n",
  249. show, msg, nsource, src, nstatus, sta,
  250. chan ? chan->fifo->chid : -1, inst << 4,
  251. chan ? chan->fifo->object.client->name : "unknown",
  252. subc, class, mthd, data);
  253. }
  254. spin_unlock_irqrestore(&gr->base.engine.lock, flags);
  255. }
  256. int
  257. nv40_gr_init(struct nvkm_gr *base)
  258. {
  259. struct nv40_gr *gr = nv40_gr(base);
  260. struct nvkm_device *device = gr->base.engine.subdev.device;
  261. int ret, i, j;
  262. u32 vramsz;
  263. /* generate and upload context program */
  264. ret = nv40_grctx_init(device, &gr->size);
  265. if (ret)
  266. return ret;
  267. /* No context present currently */
  268. nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
  269. nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
  270. nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
  271. nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
  272. nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
  273. nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
  274. nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
  275. nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
  276. nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
  277. nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
  278. nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
  279. j = nvkm_rd32(device, 0x1540) & 0xff;
  280. if (j) {
  281. for (i = 0; !(j & 1); j >>= 1, i++)
  282. ;
  283. nvkm_wr32(device, 0x405000, i);
  284. }
  285. if (device->chipset == 0x40) {
  286. nvkm_wr32(device, 0x4009b0, 0x83280fff);
  287. nvkm_wr32(device, 0x4009b4, 0x000000a0);
  288. } else {
  289. nvkm_wr32(device, 0x400820, 0x83280eff);
  290. nvkm_wr32(device, 0x400824, 0x000000a0);
  291. }
  292. switch (device->chipset) {
  293. case 0x40:
  294. case 0x45:
  295. nvkm_wr32(device, 0x4009b8, 0x0078e366);
  296. nvkm_wr32(device, 0x4009bc, 0x0000014c);
  297. break;
  298. case 0x41:
  299. case 0x42: /* pciid also 0x00Cx */
  300. /* case 0x0120: XXX (pciid) */
  301. nvkm_wr32(device, 0x400828, 0x007596ff);
  302. nvkm_wr32(device, 0x40082c, 0x00000108);
  303. break;
  304. case 0x43:
  305. nvkm_wr32(device, 0x400828, 0x0072cb77);
  306. nvkm_wr32(device, 0x40082c, 0x00000108);
  307. break;
  308. case 0x44:
  309. case 0x46: /* G72 */
  310. case 0x4a:
  311. case 0x4c: /* G7x-based C51 */
  312. case 0x4e:
  313. nvkm_wr32(device, 0x400860, 0);
  314. nvkm_wr32(device, 0x400864, 0);
  315. break;
  316. case 0x47: /* G70 */
  317. case 0x49: /* G71 */
  318. case 0x4b: /* G73 */
  319. nvkm_wr32(device, 0x400828, 0x07830610);
  320. nvkm_wr32(device, 0x40082c, 0x0000016A);
  321. break;
  322. default:
  323. break;
  324. }
  325. nvkm_wr32(device, 0x400b38, 0x2ffff800);
  326. nvkm_wr32(device, 0x400b3c, 0x00006000);
  327. /* Tiling related stuff. */
  328. switch (device->chipset) {
  329. case 0x44:
  330. case 0x4a:
  331. nvkm_wr32(device, 0x400bc4, 0x1003d888);
  332. nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
  333. break;
  334. case 0x46:
  335. nvkm_wr32(device, 0x400bc4, 0x0000e024);
  336. nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
  337. break;
  338. case 0x4c:
  339. case 0x4e:
  340. case 0x67:
  341. nvkm_wr32(device, 0x400bc4, 0x1003d888);
  342. nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
  343. break;
  344. default:
  345. break;
  346. }
  347. /* begin RAM config */
  348. vramsz = device->func->resource_size(device, 1) - 1;
  349. switch (device->chipset) {
  350. case 0x40:
  351. nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
  352. nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
  353. nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
  354. nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
  355. nvkm_wr32(device, 0x400820, 0);
  356. nvkm_wr32(device, 0x400824, 0);
  357. nvkm_wr32(device, 0x400864, vramsz);
  358. nvkm_wr32(device, 0x400868, vramsz);
  359. break;
  360. default:
  361. switch (device->chipset) {
  362. case 0x41:
  363. case 0x42:
  364. case 0x43:
  365. case 0x45:
  366. case 0x4e:
  367. case 0x44:
  368. case 0x4a:
  369. nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
  370. nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
  371. break;
  372. default:
  373. nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
  374. nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
  375. break;
  376. }
  377. nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
  378. nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
  379. nvkm_wr32(device, 0x400840, 0);
  380. nvkm_wr32(device, 0x400844, 0);
  381. nvkm_wr32(device, 0x4008A0, vramsz);
  382. nvkm_wr32(device, 0x4008A4, vramsz);
  383. break;
  384. }
  385. return 0;
  386. }
  387. int
  388. nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
  389. int index, struct nvkm_gr **pgr)
  390. {
  391. struct nv40_gr *gr;
  392. if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
  393. return -ENOMEM;
  394. *pgr = &gr->base;
  395. INIT_LIST_HEAD(&gr->chan);
  396. return nvkm_gr_ctor(func, device, index, true, &gr->base);
  397. }
  398. static const struct nvkm_gr_func
  399. nv40_gr = {
  400. .init = nv40_gr_init,
  401. .intr = nv40_gr_intr,
  402. .tile = nv40_gr_tile,
  403. .units = nv40_gr_units,
  404. .chan_new = nv40_gr_chan_new,
  405. .sclass = {
  406. { -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
  407. { -1, -1, 0x0019, &nv40_gr_object }, /* clip */
  408. { -1, -1, 0x0030, &nv40_gr_object }, /* null */
  409. { -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
  410. { -1, -1, 0x0043, &nv40_gr_object }, /* rop */
  411. { -1, -1, 0x0044, &nv40_gr_object }, /* patt */
  412. { -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
  413. { -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
  414. { -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
  415. { -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
  416. { -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
  417. { -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
  418. { -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
  419. { -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
  420. { -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
  421. { -1, -1, 0x4097, &nv40_gr_object }, /* curie */
  422. {}
  423. }
  424. };
  425. int
  426. nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
  427. {
  428. return nv40_gr_new_(&nv40_gr, device, index, pgr);
  429. }