|
@@ -1264,85 +1264,87 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
|
|
|
nvkm_mc_unk260(device, 1);
|
|
|
}
|
|
|
|
|
|
+#define CB_RESERVED 0x80000
|
|
|
+
|
|
|
int
|
|
|
gf100_grctx_generate(struct gf100_gr *gr)
|
|
|
{
|
|
|
const struct gf100_grctx_func *grctx = gr->func->grctx;
|
|
|
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
|
|
|
struct nvkm_device *device = subdev->device;
|
|
|
- struct nvkm_memory *chan;
|
|
|
+ struct nvkm_memory *inst = NULL;
|
|
|
+ struct nvkm_memory *data = NULL;
|
|
|
+ struct nvkm_vmm *vmm = NULL;
|
|
|
+ struct nvkm_vma *ctx = NULL;
|
|
|
struct gf100_grctx info;
|
|
|
int ret, i;
|
|
|
u64 addr;
|
|
|
|
|
|
- /* allocate memory to for a "channel", which we'll use to generate
|
|
|
- * the default context values
|
|
|
+ /* Allocate memory to for a "channel", which we'll use to generate
|
|
|
+ * the default context values.
|
|
|
*/
|
|
|
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
|
|
|
- 0x1000, true, &chan);
|
|
|
- if (ret) {
|
|
|
- nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
|
|
|
- return ret;
|
|
|
- }
|
|
|
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
|
|
|
+ 0x1000, 0x1000, true, &inst);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
|
|
|
- addr = nvkm_memory_addr(chan);
|
|
|
+ ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "grctx", &vmm);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
|
|
|
- /* PGD pointer */
|
|
|
- nvkm_kmap(chan);
|
|
|
- nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
|
|
|
- nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
|
|
|
- nvkm_wo32(chan, 0x0208, 0xffffffff);
|
|
|
- nvkm_wo32(chan, 0x020c, 0x000000ff);
|
|
|
+ vmm->debug = subdev->debug;
|
|
|
|
|
|
- /* PGT[0] pointer */
|
|
|
- nvkm_wo32(chan, 0x1000, 0x00000000);
|
|
|
- nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
|
|
|
+ ret = nvkm_vmm_join(vmm, inst);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
|
|
|
- /* identity-map the whole "channel" into its own vm */
|
|
|
- for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
|
|
|
- u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
|
|
|
- nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
|
|
|
- nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
|
|
|
- }
|
|
|
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
|
|
|
+ CB_RESERVED + gr->size, 0, true, &data);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
|
|
|
- /* context pointer (virt) */
|
|
|
- nvkm_wo32(chan, 0x0210, 0x00080004);
|
|
|
- nvkm_wo32(chan, 0x0214, 0x00000000);
|
|
|
- nvkm_done(chan);
|
|
|
+ ret = nvkm_vmm_get(vmm, 0, nvkm_memory_size(data), &ctx);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
|
|
|
- nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
|
|
|
- nvkm_wr32(device, 0x100cbc, 0x80000001);
|
|
|
- nvkm_msec(device, 2000,
|
|
|
- if (nvkm_rd32(device, 0x100c80) & 0x00008000)
|
|
|
- break;
|
|
|
- );
|
|
|
+ ret = nvkm_memory_map(data, 0, vmm, ctx, NULL, 0);
|
|
|
+ if (ret)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+
|
|
|
+ /* Setup context pointer. */
|
|
|
+ nvkm_kmap(inst);
|
|
|
+ nvkm_wo32(inst, 0x0210, lower_32_bits(ctx->addr + CB_RESERVED) | 4);
|
|
|
+ nvkm_wo32(inst, 0x0214, upper_32_bits(ctx->addr + CB_RESERVED));
|
|
|
+ nvkm_done(inst);
|
|
|
|
|
|
- /* setup default state for mmio list construction */
|
|
|
+ /* Setup default state for mmio list construction. */
|
|
|
info.gr = gr;
|
|
|
info.data = gr->mmio_data;
|
|
|
info.mmio = gr->mmio_list;
|
|
|
- info.addr = 0x2000 + (i * 8);
|
|
|
+ info.addr = ctx->addr;
|
|
|
info.buffer_nr = 0;
|
|
|
|
|
|
- /* make channel current */
|
|
|
+ /* Make channel current. */
|
|
|
+ addr = nvkm_memory_addr(inst) >> 12;
|
|
|
if (gr->firmware) {
|
|
|
nvkm_wr32(device, 0x409840, 0x00000030);
|
|
|
- nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
|
|
|
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr);
|
|
|
nvkm_wr32(device, 0x409504, 0x00000003);
|
|
|
nvkm_msec(device, 2000,
|
|
|
if (nvkm_rd32(device, 0x409800) & 0x00000010)
|
|
|
break;
|
|
|
);
|
|
|
|
|
|
- nvkm_kmap(chan);
|
|
|
- nvkm_wo32(chan, 0x8001c, 1);
|
|
|
- nvkm_wo32(chan, 0x80020, 0);
|
|
|
- nvkm_wo32(chan, 0x80028, 0);
|
|
|
- nvkm_wo32(chan, 0x8002c, 0);
|
|
|
- nvkm_done(chan);
|
|
|
+ nvkm_kmap(data);
|
|
|
+ nvkm_wo32(data, 0x1c, 1);
|
|
|
+ nvkm_wo32(data, 0x20, 0);
|
|
|
+ nvkm_wo32(data, 0x28, 0);
|
|
|
+ nvkm_wo32(data, 0x2c, 0);
|
|
|
+ nvkm_done(data);
|
|
|
} else {
|
|
|
nvkm_wr32(device, 0x409840, 0x80000000);
|
|
|
- nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
|
|
|
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr);
|
|
|
nvkm_wr32(device, 0x409504, 0x00000001);
|
|
|
nvkm_msec(device, 2000,
|
|
|
if (nvkm_rd32(device, 0x409800) & 0x80000000)
|
|
@@ -1352,8 +1354,8 @@ gf100_grctx_generate(struct gf100_gr *gr)
|
|
|
|
|
|
grctx->main(gr, &info);
|
|
|
|
|
|
- /* trigger a context unload by unsetting the "next channel valid" bit
|
|
|
- * and faking a context switch interrupt
|
|
|
+ /* Trigger a context unload by unsetting the "next channel valid" bit
|
|
|
+ * and faking a context switch interrupt.
|
|
|
*/
|
|
|
nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
|
|
|
nvkm_wr32(device, 0x409000, 0x00000100);
|
|
@@ -1367,17 +1369,21 @@ gf100_grctx_generate(struct gf100_gr *gr)
|
|
|
|
|
|
gr->data = kmalloc(gr->size, GFP_KERNEL);
|
|
|
if (gr->data) {
|
|
|
- nvkm_kmap(chan);
|
|
|
+ nvkm_kmap(data);
|
|
|
for (i = 0; i < gr->size; i += 4)
|
|
|
- gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
|
|
|
- nvkm_done(chan);
|
|
|
+ gr->data[i / 4] = nvkm_ro32(data, CB_RESERVED + i);
|
|
|
+ nvkm_done(data);
|
|
|
ret = 0;
|
|
|
} else {
|
|
|
ret = -ENOMEM;
|
|
|
}
|
|
|
|
|
|
done:
|
|
|
- nvkm_memory_unref(&chan);
|
|
|
+ nvkm_vmm_put(vmm, &ctx);
|
|
|
+ nvkm_memory_unref(&data);
|
|
|
+ nvkm_vmm_part(vmm, inst);
|
|
|
+ nvkm_vmm_unref(&vmm);
|
|
|
+ nvkm_memory_unref(&inst);
|
|
|
return ret;
|
|
|
}
|
|
|
|