|
@@ -41,10 +41,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
|
|
|
u32 base = (pte << 2) & ~0x0000000f;
|
|
|
u32 tmp[4];
|
|
|
|
|
|
- tmp[0] = nv_ro32(pgt, base + 0x0);
|
|
|
- tmp[1] = nv_ro32(pgt, base + 0x4);
|
|
|
- tmp[2] = nv_ro32(pgt, base + 0x8);
|
|
|
- tmp[3] = nv_ro32(pgt, base + 0xc);
|
|
|
+ tmp[0] = nvkm_ro32(pgt, base + 0x0);
|
|
|
+ tmp[1] = nvkm_ro32(pgt, base + 0x4);
|
|
|
+ tmp[2] = nvkm_ro32(pgt, base + 0x8);
|
|
|
+ tmp[3] = nvkm_ro32(pgt, base + 0xc);
|
|
|
|
|
|
while (cnt--) {
|
|
|
u32 addr = list ? (*list++ >> 12) : (null >> 12);
|
|
@@ -74,10 +74,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- nv_wo32(pgt, base + 0x0, tmp[0]);
|
|
|
- nv_wo32(pgt, base + 0x4, tmp[1]);
|
|
|
- nv_wo32(pgt, base + 0x8, tmp[2]);
|
|
|
- nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
|
|
|
+ nvkm_wo32(pgt, base + 0x0, tmp[0]);
|
|
|
+ nvkm_wo32(pgt, base + 0x4, tmp[1]);
|
|
|
+ nvkm_wo32(pgt, base + 0x8, tmp[2]);
|
|
|
+ nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -88,6 +88,7 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
|
|
u32 tmp[4];
|
|
|
int i;
|
|
|
|
|
|
+ nvkm_kmap(pgt);
|
|
|
if (pte & 3) {
|
|
|
u32 max = 4 - (pte & 3);
|
|
|
u32 part = (cnt > max) ? max : cnt;
|
|
@@ -100,15 +101,16 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
|
|
while (cnt >= 4) {
|
|
|
for (i = 0; i < 4; i++)
|
|
|
tmp[i] = *list++ >> 12;
|
|
|
- nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
|
|
|
- nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
|
|
|
- nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
|
|
|
- nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
|
|
|
cnt -= 4;
|
|
|
}
|
|
|
|
|
|
if (cnt)
|
|
|
nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
|
|
|
+ nvkm_done(pgt);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -116,6 +118,7 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
|
|
{
|
|
|
struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
|
|
|
|
|
|
+ nvkm_kmap(pgt);
|
|
|
if (pte & 3) {
|
|
|
u32 max = 4 - (pte & 3);
|
|
|
u32 part = (cnt > max) ? max : cnt;
|
|
@@ -125,15 +128,16 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
|
|
}
|
|
|
|
|
|
while (cnt >= 4) {
|
|
|
- nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
- nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
- nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
- nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
+ nvkm_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
cnt -= 4;
|
|
|
}
|
|
|
|
|
|
if (cnt)
|
|
|
nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
|
|
|
+ nvkm_done(pgt);
|
|
|
}
|
|
|
|
|
|
static void
|