Переглянути джерело

Merge branch 'linux-4.15' of git://github.com/skeggsb/linux into drm-next

- Pascal temperature sensor support
- Improved BAR2 handling, greatly reduces time required to suspend
- Rework of the MMU code
  - Allows us to properly support Pascal's new MMU layout (implemented)
  - Lays the groundwork for improved userspace APIs later
- Misc other fixes

* 'linux-4.15' of git://github.com/skeggsb/linux: (151 commits)
  drm/nouveau/gr/gf100-: don't prevent module load if firmware missing
  drm/nouveau/mmu: remove old vmm frontend
  drm/nouveau: improve selection of GPU page size
  drm/nouveau: switch over to new memory and vmm interfaces
  drm/nouveau: remove unused nouveau_fence_work()
  drm/nouveau: queue delayed unmapping of VMAs on client workqueue
  drm/nouveau: implement per-client delayed workqueue with fence support
  drm/nouveau: determine memory class for each client
  drm/nouveau: pass handle of vmm object to channel allocation ioctls
  drm/nouveau: switch to vmm limit
  drm/nouveau: allocate vmm object for every client
  drm/nouveau: replace use of cpu_coherent with memory types
  drm/nouveau: use nvif_mmu_type to determine BAR1 caching
  drm/nouveau: fetch memory type indices that we care about for ttm
  drm/nouveau: consolidate handling of dma mask
  drm/nouveau: check kind validity against mmu object
  drm/nouveau: allocate mmu object for every client
  drm/nouveau: remove trivial cases of nvxx_device() usage
  drm/nouveau/mmu: define user interfaces to mmu vmm opertaions
  drm/nouveau/mmu: define user interfaces to mmu memory allocation
  ...
Dave Airlie 8 роки тому
батько
коміт
2ef7a95fe5
100 змінених файлів з 2487 додано та 964 видалено
  1. 2 0
      drivers/gpu/drm/nouveau/Kbuild
  2. 7 0
      drivers/gpu/drm/nouveau/Kconfig
  3. 1 1
      drivers/gpu/drm/nouveau/dispnv04/disp.c
  4. 1 1
      drivers/gpu/drm/nouveau/include/nvif/cl506e.h
  5. 1 1
      drivers/gpu/drm/nouveau/include/nvif/cl506f.h
  6. 1 1
      drivers/gpu/drm/nouveau/include/nvif/cl826e.h
  7. 1 1
      drivers/gpu/drm/nouveau/include/nvif/cl826f.h
  8. 1 1
      drivers/gpu/drm/nouveau/include/nvif/cl906f.h
  9. 1 1
      drivers/gpu/drm/nouveau/include/nvif/cla06f.h
  10. 17 0
      drivers/gpu/drm/nouveau/include/nvif/class.h
  11. 0 5
      drivers/gpu/drm/nouveau/include/nvif/device.h
  12. 42 0
      drivers/gpu/drm/nouveau/include/nvif/if0008.h
  13. 22 0
      drivers/gpu/drm/nouveau/include/nvif/if000a.h
  14. 11 0
      drivers/gpu/drm/nouveau/include/nvif/if000b.h
  15. 64 0
      drivers/gpu/drm/nouveau/include/nvif/if000c.h
  16. 12 0
      drivers/gpu/drm/nouveau/include/nvif/if000d.h
  17. 25 0
      drivers/gpu/drm/nouveau/include/nvif/if500b.h
  18. 21 0
      drivers/gpu/drm/nouveau/include/nvif/if500d.h
  19. 23 0
      drivers/gpu/drm/nouveau/include/nvif/if900b.h
  20. 21 0
      drivers/gpu/drm/nouveau/include/nvif/if900d.h
  21. 27 0
      drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
  22. 21 0
      drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
  23. 7 3
      drivers/gpu/drm/nouveau/include/nvif/ioctl.h
  24. 18 0
      drivers/gpu/drm/nouveau/include/nvif/mem.h
  25. 56 0
      drivers/gpu/drm/nouveau/include/nvif/mmu.h
  26. 5 2
      drivers/gpu/drm/nouveau/include/nvif/object.h
  27. 0 14
      drivers/gpu/drm/nouveau/include/nvif/os.h
  28. 42 0
      drivers/gpu/drm/nouveau/include/nvif/vmm.h
  29. 2 1
      drivers/gpu/drm/nouveau/include/nvkm/core/client.h
  30. 1 1
      drivers/gpu/drm/nouveau/include/nvkm/core/device.h
  31. 1 0
      drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
  32. 6 8
      drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
  33. 56 8
      drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
  34. 31 1
      drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
  35. 12 29
      drivers/gpu/drm/nouveau/include/nvkm/core/object.h
  36. 31 0
      drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
  37. 19 0
      drivers/gpu/drm/nouveau/include/nvkm/core/os.h
  38. 1 0
      drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
  39. 1 1
      drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
  40. 1 0
      drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
  41. 3 2
      drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
  42. 2 1
      drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
  43. 7 2
      drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
  44. 10 26
      drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
  45. 1 0
      drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
  46. 2 5
      drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
  47. 101 39
      drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
  48. 1 0
      drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
  49. 23 19
      drivers/gpu/drm/nouveau/nouveau_abi16.c
  50. 1 1
      drivers/gpu/drm/nouveau/nouveau_abi16.h
  51. 5 1
      drivers/gpu/drm/nouveau/nouveau_bios.c
  52. 219 172
      drivers/gpu/drm/nouveau/nouveau_bo.c
  53. 7 10
      drivers/gpu/drm/nouveau/nouveau_bo.h
  54. 32 19
      drivers/gpu/drm/nouveau/nouveau_chan.c
  55. 2 1
      drivers/gpu/drm/nouveau/nouveau_chan.h
  56. 1 4
      drivers/gpu/drm/nouveau/nouveau_display.h
  57. 8 7
      drivers/gpu/drm/nouveau/nouveau_dma.c
  58. 1 1
      drivers/gpu/drm/nouveau/nouveau_dma.h
  59. 130 31
      drivers/gpu/drm/nouveau/nouveau_drm.c
  60. 29 4
      drivers/gpu/drm/nouveau/nouveau_drv.h
  61. 4 3
      drivers/gpu/drm/nouveau/nouveau_fbcon.c
  62. 0 58
      drivers/gpu/drm/nouveau/nouveau_fence.c
  63. 1 6
      drivers/gpu/drm/nouveau/nouveau_fence.h
  64. 64 59
      drivers/gpu/drm/nouveau/nouveau_gem.c
  65. 0 3
      drivers/gpu/drm/nouveau/nouveau_gem.h
  66. 198 0
      drivers/gpu/drm/nouveau/nouveau_mem.c
  67. 30 0
      drivers/gpu/drm/nouveau/nouveau_mem.h
  68. 23 31
      drivers/gpu/drm/nouveau/nouveau_sgdma.c
  69. 88 192
      drivers/gpu/drm/nouveau/nouveau_ttm.c
  70. 135 0
      drivers/gpu/drm/nouveau/nouveau_vmm.c
  71. 31 0
      drivers/gpu/drm/nouveau/nouveau_vmm.h
  72. 6 6
      drivers/gpu/drm/nouveau/nv50_display.c
  73. 5 4
      drivers/gpu/drm/nouveau/nv50_fbcon.c
  74. 5 38
      drivers/gpu/drm/nouveau/nv84_fence.c
  75. 5 4
      drivers/gpu/drm/nouveau/nvc0_fbcon.c
  76. 3 0
      drivers/gpu/drm/nouveau/nvif/Kbuild
  77. 88 0
      drivers/gpu/drm/nouveau/nvif/mem.c
  78. 117 0
      drivers/gpu/drm/nouveau/nvif/mmu.c
  79. 59 28
      drivers/gpu/drm/nouveau/nvif/object.c
  80. 167 0
      drivers/gpu/drm/nouveau/nvif/vmm.c
  81. 2 0
      drivers/gpu/drm/nouveau/nvkm/core/client.c
  82. 10 0
      drivers/gpu/drm/nouveau/nvkm/core/engine.c
  83. 25 22
      drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
  84. 11 4
      drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
  85. 94 5
      drivers/gpu/drm/nouveau/nvkm/core/memory.c
  86. 3 2
      drivers/gpu/drm/nouveau/nvkm/core/mm.c
  87. 12 2
      drivers/gpu/drm/nouveau/nvkm/core/object.c
  88. 11 2
      drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
  89. 1 0
      drivers/gpu/drm/nouveau/nvkm/core/ramht.c
  90. 52 46
      drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
  91. 1 1
      drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
  92. 12 12
      drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
  93. 2 4
      drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
  94. 8 1
      drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
  95. 3 1
      drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
  96. 1 0
      drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
  97. 1 1
      drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
  98. 3 3
      drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
  99. 1 1
      drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
  100. 8 0
      drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c

+ 2 - 0
drivers/gpu/drm/nouveau/Kbuild

@@ -30,9 +30,11 @@ nouveau-y += nouveau_vga.o
 # DRM - memory management
 # DRM - memory management
 nouveau-y += nouveau_bo.o
 nouveau-y += nouveau_bo.o
 nouveau-y += nouveau_gem.o
 nouveau-y += nouveau_gem.o
+nouveau-y += nouveau_mem.o
 nouveau-y += nouveau_prime.o
 nouveau-y += nouveau_prime.o
 nouveau-y += nouveau_sgdma.o
 nouveau-y += nouveau_sgdma.o
 nouveau-y += nouveau_ttm.o
 nouveau-y += nouveau_ttm.o
+nouveau-y += nouveau_vmm.o
 
 
 # DRM - modesetting
 # DRM - modesetting
 nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
 nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o

+ 7 - 0
drivers/gpu/drm/nouveau/Kconfig

@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT
 	help
 	help
 	  Selects the default debug level
 	  Selects the default debug level
 
 
+config NOUVEAU_DEBUG_MMU
+	bool "Enable additional MMU debugging"
+	depends on DRM_NOUVEAU
+	default n
+	help
+	  Say Y here if you want to enable verbose MMU debug output.
+
 config DRM_NOUVEAU_BACKLIGHT
 config DRM_NOUVEAU_BACKLIGHT
 	bool "Support for backlight control"
 	bool "Support for backlight control"
 	depends on DRM_NOUVEAU
 	depends on DRM_NOUVEAU

+ 1 - 1
drivers/gpu/drm/nouveau/dispnv04/disp.c

@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
 	if (!disp)
 	if (!disp)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	nvif_object_map(&drm->client.device.object);
+	nvif_object_map(&drm->client.device.object, NULL, 0);
 
 
 	nouveau_display(dev)->priv = disp;
 	nouveau_display(dev)->priv = disp;
 	nouveau_display(dev)->dtor = nv04_display_destroy;
 	nouveau_display(dev)->dtor = nv04_display_destroy;

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvif/cl506e.h

@@ -5,7 +5,7 @@ struct nv50_channel_dma_v0 {
 	__u8  version;
 	__u8  version;
 	__u8  chid;
 	__u8  chid;
 	__u8  pad02[6];
 	__u8  pad02[6];
-	__u64 vm;
+	__u64 vmm;
 	__u64 pushbuf;
 	__u64 pushbuf;
 	__u64 offset;
 	__u64 offset;
 };
 };

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvif/cl506f.h

@@ -8,6 +8,6 @@ struct nv50_channel_gpfifo_v0 {
 	__u32 ilength;
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 ioffset;
 	__u64 pushbuf;
 	__u64 pushbuf;
-	__u64 vm;
+	__u64 vmm;
 };
 };
 #endif
 #endif

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvif/cl826e.h

@@ -5,7 +5,7 @@ struct g82_channel_dma_v0 {
 	__u8  version;
 	__u8  version;
 	__u8  chid;
 	__u8  chid;
 	__u8  pad02[6];
 	__u8  pad02[6];
-	__u64 vm;
+	__u64 vmm;
 	__u64 pushbuf;
 	__u64 pushbuf;
 	__u64 offset;
 	__u64 offset;
 };
 };

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvif/cl826f.h

@@ -8,7 +8,7 @@ struct g82_channel_gpfifo_v0 {
 	__u32 ilength;
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 ioffset;
 	__u64 pushbuf;
 	__u64 pushbuf;
-	__u64 vm;
+	__u64 vmm;
 };
 };
 
 
 #define NV826F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
 #define NV826F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvif/cl906f.h

@@ -7,7 +7,7 @@ struct fermi_channel_gpfifo_v0 {
 	__u8  pad02[2];
 	__u8  pad02[2];
 	__u32 ilength;
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 ioffset;
-	__u64 vm;
+	__u64 vmm;
 };
 };
 
 
 #define NV906F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
 #define NV906F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvif/cla06f.h

@@ -22,7 +22,7 @@ struct kepler_channel_gpfifo_a_v0 {
 	__u32 engines;
 	__u32 engines;
 	__u32 ilength;
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 ioffset;
-	__u64 vm;
+	__u64 vmm;
 };
 };
 
 
 #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
 #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00

+ 17 - 0
drivers/gpu/drm/nouveau/include/nvif/class.h

@@ -14,6 +14,23 @@
 #define NVIF_CLASS_SW_NV50                           /* if0005.h */ -0x00000006
 #define NVIF_CLASS_SW_NV50                           /* if0005.h */ -0x00000006
 #define NVIF_CLASS_SW_GF100                          /* if0005.h */ -0x00000007
 #define NVIF_CLASS_SW_GF100                          /* if0005.h */ -0x00000007
 
 
+#define NVIF_CLASS_MMU                               /* if0008.h */  0x80000008
+#define NVIF_CLASS_MMU_NV04                          /* if0008.h */  0x80000009
+#define NVIF_CLASS_MMU_NV50                          /* if0008.h */  0x80005009
+#define NVIF_CLASS_MMU_GF100                         /* if0008.h */  0x80009009
+
+#define NVIF_CLASS_MEM                               /* if000a.h */  0x8000000a
+#define NVIF_CLASS_MEM_NV04                          /* if000b.h */  0x8000000b
+#define NVIF_CLASS_MEM_NV50                          /* if500b.h */  0x8000500b
+#define NVIF_CLASS_MEM_GF100                         /* if900b.h */  0x8000900b
+
+#define NVIF_CLASS_VMM                               /* if000c.h */  0x8000000c
+#define NVIF_CLASS_VMM_NV04                          /* if000d.h */  0x8000000d
+#define NVIF_CLASS_VMM_NV50                          /* if500d.h */  0x8000500d
+#define NVIF_CLASS_VMM_GF100                         /* if900d.h */  0x8000900d
+#define NVIF_CLASS_VMM_GM200                         /* ifb00d.h */  0x8000b00d
+#define NVIF_CLASS_VMM_GP100                         /* ifc00d.h */  0x8000c00d
+
 /* the below match nvidia-assigned (either in hw, or sw) class numbers */
 /* the below match nvidia-assigned (either in hw, or sw) class numbers */
 #define NV_NULL_CLASS                                                0x00000030
 #define NV_NULL_CLASS                                                0x00000030
 
 

+ 0 - 5
drivers/gpu/drm/nouveau/include/nvif/device.h

@@ -38,7 +38,6 @@ u64  nvif_device_time(struct nvif_device *);
 /*XXX*/
 /*XXX*/
 #include <subdev/bios.h>
 #include <subdev/bios.h>
 #include <subdev/fb.h>
 #include <subdev/fb.h>
-#include <subdev/mmu.h>
 #include <subdev/bar.h>
 #include <subdev/bar.h>
 #include <subdev/gpio.h>
 #include <subdev/gpio.h>
 #include <subdev/clk.h>
 #include <subdev/clk.h>
@@ -57,8 +56,6 @@ u64  nvif_device_time(struct nvif_device *);
 })
 })
 #define nvxx_bios(a) nvxx_device(a)->bios
 #define nvxx_bios(a) nvxx_device(a)->bios
 #define nvxx_fb(a) nvxx_device(a)->fb
 #define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_mmu(a) nvxx_device(a)->mmu
-#define nvxx_bar(a) nvxx_device(a)->bar
 #define nvxx_gpio(a) nvxx_device(a)->gpio
 #define nvxx_gpio(a) nvxx_device(a)->gpio
 #define nvxx_clk(a) nvxx_device(a)->clk
 #define nvxx_clk(a) nvxx_device(a)->clk
 #define nvxx_i2c(a) nvxx_device(a)->i2c
 #define nvxx_i2c(a) nvxx_device(a)->i2c
@@ -66,10 +63,8 @@ u64  nvif_device_time(struct nvif_device *);
 #define nvxx_therm(a) nvxx_device(a)->therm
 #define nvxx_therm(a) nvxx_device(a)->therm
 #define nvxx_volt(a) nvxx_device(a)->volt
 #define nvxx_volt(a) nvxx_device(a)->volt
 
 
-#include <core/device.h>
 #include <engine/fifo.h>
 #include <engine/fifo.h>
 #include <engine/gr.h>
 #include <engine/gr.h>
-#include <engine/sw.h>
 
 
 #define nvxx_fifo(a) nvxx_device(a)->fifo
 #define nvxx_fifo(a) nvxx_device(a)->fifo
 #define nvxx_gr(a) nvxx_device(a)->gr
 #define nvxx_gr(a) nvxx_device(a)->gr

+ 42 - 0
drivers/gpu/drm/nouveau/include/nvif/if0008.h

@@ -0,0 +1,42 @@
+#ifndef __NVIF_IF0008_H__
+#define __NVIF_IF0008_H__
+struct nvif_mmu_v0 {
+	__u8  version;
+	__u8  dmabits;
+	__u8  heap_nr;
+	__u8  type_nr;
+	__u16 kind_nr;
+};
+
+#define NVIF_MMU_V0_HEAP                                                   0x00
+#define NVIF_MMU_V0_TYPE                                                   0x01
+#define NVIF_MMU_V0_KIND                                                   0x02
+
+struct nvif_mmu_heap_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  pad02[6];
+	__u64 size;
+};
+
+struct nvif_mmu_type_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  heap;
+	__u8  vram;
+	__u8  host;
+	__u8  comp;
+	__u8  disp;
+	__u8  kind;
+	__u8  mappable;
+	__u8  coherent;
+	__u8  uncached;
+};
+
+struct nvif_mmu_kind_v0 {
+	__u8  version;
+	__u8  pad01[1];
+	__u16 count;
+	__u8  data[];
+};
+#endif

+ 22 - 0
drivers/gpu/drm/nouveau/include/nvif/if000a.h

@@ -0,0 +1,22 @@
+#ifndef __NVIF_IF000A_H__
+#define __NVIF_IF000A_H__
+struct nvif_mem_v0 {
+	__u8  version;
+	__u8  type;
+	__u8  page;
+	__u8  pad03[5];
+	__u64 size;
+	__u64 addr;
+	__u8  data[];
+};
+
+struct nvif_mem_ram_vn {
+};
+
+struct nvif_mem_ram_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	dma_addr_t *dma;
+	struct scatterlist *sgl;
+};
+#endif

+ 11 - 0
drivers/gpu/drm/nouveau/include/nvif/if000b.h

@@ -0,0 +1,11 @@
+#ifndef __NVIF_IF000B_H__
+#define __NVIF_IF000B_H__
+#include "if000a.h"
+
+struct nv04_mem_vn {
+	/* nvkm_mem_vX ... */
+};
+
+struct nv04_mem_map_vn {
+};
+#endif

+ 64 - 0
drivers/gpu/drm/nouveau/include/nvif/if000c.h

@@ -0,0 +1,64 @@
+#ifndef __NVIF_IF000C_H__
+#define __NVIF_IF000C_H__
+struct nvif_vmm_v0 {
+	__u8  version;
+	__u8  page_nr;
+	__u8  pad02[6];
+	__u64 addr;
+	__u64 size;
+	__u8  data[];
+};
+
+#define NVIF_VMM_V0_PAGE                                                   0x00
+#define NVIF_VMM_V0_GET                                                    0x01
+#define NVIF_VMM_V0_PUT                                                    0x02
+#define NVIF_VMM_V0_MAP                                                    0x03
+#define NVIF_VMM_V0_UNMAP                                                  0x04
+
+struct nvif_vmm_page_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  shift;
+	__u8  sparse;
+	__u8  vram;
+	__u8  host;
+	__u8  comp;
+	__u8  pad07[1];
+};
+
+struct nvif_vmm_get_v0 {
+	__u8  version;
+#define NVIF_VMM_GET_V0_ADDR                                               0x00
+#define NVIF_VMM_GET_V0_PTES                                               0x01
+#define NVIF_VMM_GET_V0_LAZY	                                           0x02
+	__u8  type;
+	__u8  sparse;
+	__u8  page;
+	__u8  align;
+	__u8  pad05[3];
+	__u64 size;
+	__u64 addr;
+};
+
+struct nvif_vmm_put_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 addr;
+};
+
+struct nvif_vmm_map_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 addr;
+	__u64 size;
+	__u64 memory;
+	__u64 offset;
+	__u8  data[];
+};
+
+struct nvif_vmm_unmap_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 addr;
+};
+#endif

+ 12 - 0
drivers/gpu/drm/nouveau/include/nvif/if000d.h

@@ -0,0 +1,12 @@
+#ifndef __NVIF_IF000D_H__
+#define __NVIF_IF000D_H__
+#include "if000c.h"
+
+struct nv04_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct nv04_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+#endif

+ 25 - 0
drivers/gpu/drm/nouveau/include/nvif/if500b.h

@@ -0,0 +1,25 @@
+#ifndef __NVIF_IF500B_H__
+#define __NVIF_IF500B_H__
+#include "if000a.h"
+
+struct nv50_mem_vn {
+	/* nvif_mem_vX ... */
+};
+
+struct nv50_mem_v0 {
+	/* nvif_mem_vX ... */
+	__u8  version;
+	__u8  bankswz;
+	__u8  contig;
+};
+
+struct nv50_mem_map_vn {
+};
+
+struct nv50_mem_map_v0 {
+	__u8  version;
+	__u8  ro;
+	__u8  kind;
+	__u8  comp;
+};
+#endif

+ 21 - 0
drivers/gpu/drm/nouveau/include/nvif/if500d.h

@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF500D_H__
+#define __NVIF_IF500D_H__
+#include "if000c.h"
+
+struct nv50_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct nv50_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct nv50_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+	__u8  comp;
+};
+#endif

+ 23 - 0
drivers/gpu/drm/nouveau/include/nvif/if900b.h

@@ -0,0 +1,23 @@
+#ifndef __NVIF_IF900B_H__
+#define __NVIF_IF900B_H__
+#include "if000a.h"
+
+struct gf100_mem_vn {
+	/* nvif_mem_vX ... */
+};
+
+struct gf100_mem_v0 {
+	/* nvif_mem_vX ... */
+	__u8  version;
+	__u8  contig;
+};
+
+struct gf100_mem_map_vn {
+};
+
+struct gf100_mem_map_v0 {
+	__u8  version;
+	__u8  ro;
+	__u8  kind;
+};
+#endif

+ 21 - 0
drivers/gpu/drm/nouveau/include/nvif/if900d.h

@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF900D_H__
+#define __NVIF_IF900D_H__
+#include "if000c.h"
+
+struct gf100_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct gf100_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gf100_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
+#endif

+ 27 - 0
drivers/gpu/drm/nouveau/include/nvif/ifb00d.h

@@ -0,0 +1,27 @@
+#ifndef __NVIF_IFB00D_H__
+#define __NVIF_IFB00D_H__
+#include "if000c.h"
+
+struct gm200_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct gm200_vmm_v0 {
+	/* nvif_vmm_vX ... */
+	__u8  version;
+	__u8  bigpage;
+};
+
+struct gm200_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gm200_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
+#endif

+ 21 - 0
drivers/gpu/drm/nouveau/include/nvif/ifc00d.h

@@ -0,0 +1,21 @@
+#ifndef __NVIF_IFC00D_H__
+#define __NVIF_IFC00D_H__
+#include "if000c.h"
+
+struct gp100_vmm_vn {
+	/* nvif_vmm_vX ... */
+};
+
+struct gp100_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gp100_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
+#endif

+ 7 - 3
drivers/gpu/drm/nouveau/include/nvif/ioctl.h

@@ -1,7 +1,7 @@
 #ifndef __NVIF_IOCTL_H__
 #ifndef __NVIF_IOCTL_H__
 #define __NVIF_IOCTL_H__
 #define __NVIF_IOCTL_H__
 
 
-#define NVIF_VERSION_LATEST                               0x0000000000000000ULL
+#define NVIF_VERSION_LATEST                               0x0000000000000100ULL
 
 
 struct nvif_ioctl_v0 {
 struct nvif_ioctl_v0 {
 	__u8  version;
 	__u8  version;
@@ -83,9 +83,13 @@ struct nvif_ioctl_wr_v0 {
 struct nvif_ioctl_map_v0 {
 struct nvif_ioctl_map_v0 {
 	/* nvif_ioctl ... */
 	/* nvif_ioctl ... */
 	__u8  version;
 	__u8  version;
-	__u8  pad01[3];
-	__u32 length;
+#define NVIF_IOCTL_MAP_V0_IO                                               0x00
+#define NVIF_IOCTL_MAP_V0_VA                                               0x01
+	__u8  type;
+	__u8  pad02[6];
 	__u64 handle;
 	__u64 handle;
+	__u64 length;
+	__u8  data[];
 };
 };
 
 
 struct nvif_ioctl_unmap {
 struct nvif_ioctl_unmap {

+ 18 - 0
drivers/gpu/drm/nouveau/include/nvif/mem.h

@@ -0,0 +1,18 @@
+#ifndef __NVIF_MEM_H__
+#define __NVIF_MEM_H__
+#include "mmu.h"
+
+struct nvif_mem {
+	struct nvif_object object;
+	u8  type;
+	u8  page;
+	u64 addr;
+	u64 size;
+};
+
+int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+		       u64 size, void *argv, u32 argc, struct nvif_mem *);
+int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+		  u64 size, void *argv, u32 argc, struct nvif_mem *);
+void nvif_mem_fini(struct nvif_mem *);
+#endif

+ 56 - 0
drivers/gpu/drm/nouveau/include/nvif/mmu.h

@@ -0,0 +1,56 @@
+#ifndef __NVIF_MMU_H__
+#define __NVIF_MMU_H__
+#include <nvif/object.h>
+
+struct nvif_mmu {
+	struct nvif_object object;
+	u8  dmabits;
+	u8  heap_nr;
+	u8  type_nr;
+	u16 kind_nr;
+
+	struct {
+		u64 size;
+	} *heap;
+
+	struct {
+#define NVIF_MEM_VRAM                                                      0x01
+#define NVIF_MEM_HOST                                                      0x02
+#define NVIF_MEM_COMP                                                      0x04
+#define NVIF_MEM_DISP                                                      0x08
+#define NVIF_MEM_KIND                                                      0x10
+#define NVIF_MEM_MAPPABLE                                                  0x20
+#define NVIF_MEM_COHERENT                                                  0x40
+#define NVIF_MEM_UNCACHED                                                  0x80
+		u8 type;
+		u8 heap;
+	} *type;
+
+	u8 *kind;
+};
+
+int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
+void nvif_mmu_fini(struct nvif_mmu *);
+
+static inline bool
+nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
+{
+	const u8 invalid = mmu->kind_nr - 1;
+	if (kind) {
+		if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+			return false;
+	}
+	return true;
+}
+
+static inline int
+nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
+{
+	int i;
+	for (i = 0; i < mmu->type_nr; i++) {
+		if ((mmu->type[i].type & mask) == mask)
+			return i;
+	}
+	return -EINVAL;
+}
+#endif

+ 5 - 2
drivers/gpu/drm/nouveau/include/nvif/object.h

@@ -16,7 +16,7 @@ struct nvif_object {
 	void *priv; /*XXX: hack */
 	void *priv; /*XXX: hack */
 	struct {
 	struct {
 		void __iomem *ptr;
 		void __iomem *ptr;
-		u32 size;
+		u64 size;
 	} map;
 	} map;
 };
 };
 
 
@@ -29,7 +29,10 @@ void nvif_object_sclass_put(struct nvif_sclass **);
 u32  nvif_object_rd(struct nvif_object *, int, u64);
 u32  nvif_object_rd(struct nvif_object *, int, u64);
 void nvif_object_wr(struct nvif_object *, int, u64, u32);
 void nvif_object_wr(struct nvif_object *, int, u64, u32);
 int  nvif_object_mthd(struct nvif_object *, u32, void *, u32);
 int  nvif_object_mthd(struct nvif_object *, u32, void *, u32);
-int  nvif_object_map(struct nvif_object *);
+int  nvif_object_map_handle(struct nvif_object *, void *, u32,
+			    u64 *handle, u64 *length);
+void nvif_object_unmap_handle(struct nvif_object *);
+int  nvif_object_map(struct nvif_object *, void *, u32);
 void nvif_object_unmap(struct nvif_object *);
 void nvif_object_unmap(struct nvif_object *);
 
 
 #define nvif_handle(a) (unsigned long)(void *)(a)
 #define nvif_handle(a) (unsigned long)(void *)(a)

+ 0 - 14
drivers/gpu/drm/nouveau/include/nvif/os.h

@@ -33,18 +33,4 @@
 
 
 #include <soc/tegra/fuse.h>
 #include <soc/tegra/fuse.h>
 #include <soc/tegra/pmc.h>
 #include <soc/tegra/pmc.h>
-
-#ifndef ioread32_native
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native  ioread32be
-#define iowrite32_native iowrite32be
-#else /* def __BIG_ENDIAN */
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native  ioread32
-#define iowrite32_native iowrite32
-#endif /* def __BIG_ENDIAN else */
-#endif /* !ioread32_native */
 #endif
 #endif

+ 42 - 0
drivers/gpu/drm/nouveau/include/nvif/vmm.h

@@ -0,0 +1,42 @@
+#ifndef __NVIF_VMM_H__
+#define __NVIF_VMM_H__
+#include <nvif/object.h>
+struct nvif_mem;
+struct nvif_mmu;
+
+enum nvif_vmm_get {
+	ADDR,
+	PTES,
+	LAZY
+};
+
+struct nvif_vma {
+	u64 addr;
+	u64 size;
+};
+
+struct nvif_vmm {
+	struct nvif_object object;
+	u64 start;
+	u64 limit;
+
+	struct {
+		u8 shift;
+		bool sparse:1;
+		bool vram:1;
+		bool host:1;
+		bool comp:1;
+	} *page;
+	int page_nr;
+};
+
+int nvif_vmm_init(struct nvif_mmu *, s32 oclass, u64 addr, u64 size,
+		  void *argv, u32 argc, struct nvif_vmm *);
+void nvif_vmm_fini(struct nvif_vmm *);
+int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
+		 u8 page, u8 align, u64 size, struct nvif_vma *);
+void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
+int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
+		 struct nvif_mem *, u64 offset);
+int nvif_vmm_unmap(struct nvif_vmm *, u64);
+#endif

+ 2 - 1
drivers/gpu/drm/nouveau/include/nvkm/core/client.h

@@ -16,7 +16,8 @@ struct nvkm_client {
 	void *data;
 	void *data;
 	int (*ntfy)(const void *, u32, const void *, u32);
 	int (*ntfy)(const void *, u32, const void *, u32);
 
 
-	struct nvkm_vm *vm;
+	struct list_head umem;
+	spinlock_t lock;
 };
 };
 
 
 int  nvkm_client_new(const char *name, u64 device, const char *cfg,
 int  nvkm_client_new(const char *name, u64 device, const char *cfg,

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvkm/core/device.h

@@ -1,7 +1,7 @@
 #ifndef __NVKM_DEVICE_H__
 #ifndef __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
+#include <core/oclass.h>
 #include <core/event.h>
 #include <core/event.h>
-#include <core/object.h>
 
 
 enum nvkm_devidx {
 enum nvkm_devidx {
 	NVKM_SUBDEV_PCI,
 	NVKM_SUBDEV_PCI,

+ 1 - 0
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h

@@ -15,6 +15,7 @@ struct nvkm_engine {
 
 
 struct nvkm_engine_func {
 struct nvkm_engine_func {
 	void *(*dtor)(struct nvkm_engine *);
 	void *(*dtor)(struct nvkm_engine *);
+	void (*preinit)(struct nvkm_engine *);
 	int (*oneinit)(struct nvkm_engine *);
 	int (*oneinit)(struct nvkm_engine *);
 	int (*init)(struct nvkm_engine *);
 	int (*init)(struct nvkm_engine *);
 	int (*fini)(struct nvkm_engine *, bool suspend);
 	int (*fini)(struct nvkm_engine *, bool suspend);

+ 6 - 8
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h

@@ -1,17 +1,16 @@
 #ifndef __NVKM_GPUOBJ_H__
 #ifndef __NVKM_GPUOBJ_H__
 #define __NVKM_GPUOBJ_H__
 #define __NVKM_GPUOBJ_H__
-#include <core/object.h>
 #include <core/memory.h>
 #include <core/memory.h>
 #include <core/mm.h>
 #include <core/mm.h>
-struct nvkm_vma;
-struct nvkm_vm;
 
 
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
 #define NVOBJ_FLAG_HEAP       0x00000004
 #define NVOBJ_FLAG_HEAP       0x00000004
 
 
 struct nvkm_gpuobj {
 struct nvkm_gpuobj {
-	struct nvkm_object object;
-	const struct nvkm_gpuobj_func *func;
+	union {
+		const struct nvkm_gpuobj_func *func;
+		const struct nvkm_gpuobj_func *ptrs;
+	};
 	struct nvkm_gpuobj *parent;
 	struct nvkm_gpuobj *parent;
 	struct nvkm_memory *memory;
 	struct nvkm_memory *memory;
 	struct nvkm_mm_node *node;
 	struct nvkm_mm_node *node;
@@ -28,15 +27,14 @@ struct nvkm_gpuobj_func {
 	void (*release)(struct nvkm_gpuobj *);
 	void (*release)(struct nvkm_gpuobj *);
 	u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
 	u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
 	void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
 	void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+	int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
+		   struct nvkm_vma *, void *argv, u32 argc);
 };
 };
 
 
 int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
 int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
 		    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
 		    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
 void nvkm_gpuobj_del(struct nvkm_gpuobj **);
 void nvkm_gpuobj_del(struct nvkm_gpuobj **);
 int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
 int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
-int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
-		    struct nvkm_vma *);
-void nvkm_gpuobj_unmap(struct nvkm_vma *);
 void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
 void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
 			   u32 length);
 			   u32 length);
 void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
 void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,

+ 56 - 8
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h

@@ -3,7 +3,12 @@
 #include <core/os.h>
 #include <core/os.h>
 struct nvkm_device;
 struct nvkm_device;
 struct nvkm_vma;
 struct nvkm_vma;
-struct nvkm_vm;
+struct nvkm_vmm;
+
+struct nvkm_tags {
+	struct nvkm_mm_node *mn;
+	refcount_t refcount;
+};
 
 
 enum nvkm_memory_target {
 enum nvkm_memory_target {
 	NVKM_MEM_TARGET_INST, /* instance memory */
 	NVKM_MEM_TARGET_INST, /* instance memory */
@@ -14,41 +19,84 @@ enum nvkm_memory_target {
 
 
 struct nvkm_memory {
 struct nvkm_memory {
 	const struct nvkm_memory_func *func;
 	const struct nvkm_memory_func *func;
+	const struct nvkm_memory_ptrs *ptrs;
+	struct kref kref;
+	struct nvkm_tags *tags;
 };
 };
 
 
 struct nvkm_memory_func {
 struct nvkm_memory_func {
 	void *(*dtor)(struct nvkm_memory *);
 	void *(*dtor)(struct nvkm_memory *);
 	enum nvkm_memory_target (*target)(struct nvkm_memory *);
 	enum nvkm_memory_target (*target)(struct nvkm_memory *);
+	u8 (*page)(struct nvkm_memory *);
 	u64 (*addr)(struct nvkm_memory *);
 	u64 (*addr)(struct nvkm_memory *);
 	u64 (*size)(struct nvkm_memory *);
 	u64 (*size)(struct nvkm_memory *);
-	void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+	void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
 	void __iomem *(*acquire)(struct nvkm_memory *);
 	void __iomem *(*acquire)(struct nvkm_memory *);
 	void (*release)(struct nvkm_memory *);
 	void (*release)(struct nvkm_memory *);
+	int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
+		   struct nvkm_vma *, void *argv, u32 argc);
+};
+
+struct nvkm_memory_ptrs {
 	u32 (*rd32)(struct nvkm_memory *, u64 offset);
 	u32 (*rd32)(struct nvkm_memory *, u64 offset);
 	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
 	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
-	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
 };
 };
 
 
 void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
 void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
 int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
 int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
 		    u64 size, u32 align, bool zero, struct nvkm_memory **);
 		    u64 size, u32 align, bool zero, struct nvkm_memory **);
-void nvkm_memory_del(struct nvkm_memory **);
+struct nvkm_memory *nvkm_memory_ref(struct nvkm_memory *);
+void nvkm_memory_unref(struct nvkm_memory **);
+int nvkm_memory_tags_get(struct nvkm_memory *, struct nvkm_device *, u32 tags,
+			 void (*clear)(struct nvkm_device *, u32, u32),
+			 struct nvkm_tags **);
+void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
+			  struct nvkm_tags **);
+
 #define nvkm_memory_target(p) (p)->func->target(p)
 #define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_page(p) (p)->func->page(p)
 #define nvkm_memory_addr(p) (p)->func->addr(p)
 #define nvkm_memory_addr(p) (p)->func->addr(p)
 #define nvkm_memory_size(p) (p)->func->size(p)
 #define nvkm_memory_size(p) (p)->func->size(p)
 #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
 #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
-#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+#define nvkm_memory_map(p,o,vm,va,av,ac)                                       \
+	(p)->func->map((p),(o),(vm),(va),(av),(ac))
 
 
 /* accessor macros - kmap()/done() must bracket use of the other accessor
 /* accessor macros - kmap()/done() must bracket use of the other accessor
  * macros to guarantee correct behaviour across all chipsets
  * macros to guarantee correct behaviour across all chipsets
  */
  */
 #define nvkm_kmap(o)     (o)->func->acquire(o)
 #define nvkm_kmap(o)     (o)->func->acquire(o)
-#define nvkm_ro32(o,a)   (o)->func->rd32((o), (a))
-#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_done(o)     (o)->func->release(o)
+
+#define nvkm_ro32(o,a)   (o)->ptrs->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
 #define nvkm_mo32(o,a,m,d) ({                                                  \
 #define nvkm_mo32(o,a,m,d) ({                                                  \
 	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
 	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
 	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
 	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
 	_data;                                                                 \
 	_data;                                                                 \
 })
 })
-#define nvkm_done(o)     (o)->func->release(o)
+
+#define nvkm_wo64(o,a,d) do {                                                  \
+	u64 __a = (a), __d = (d);                                              \
+	nvkm_wo32((o), __a + 0, lower_32_bits(__d));                           \
+	nvkm_wo32((o), __a + 4, upper_32_bits(__d));                           \
+} while(0)
+
+#define nvkm_fill(t,s,o,a,d,c) do {                                            \
+	u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s;          \
+	u##t __iomem *_m = nvkm_kmap(o);                                       \
+	if (likely(_m)) {                                                      \
+		if (_d) {                                                      \
+			while (_c--)                                           \
+				iowrite##t##_native(_d, &_m[_o++]);            \
+		} else {                                                       \
+			memset_io(&_m[_o], _d, _s);                            \
+		}                                                              \
+	} else {                                                               \
+		for (; _c; _c--, _a += BIT(s))                                 \
+			nvkm_wo##t((o), _a, _d);                               \
+	}                                                                      \
+	nvkm_done(o);                                                          \
+} while(0)
+#define nvkm_fo32(o,a,d,c) nvkm_fill(32, 2, (o), (a), (d), (c))
+#define nvkm_fo64(o,a,d,c) nvkm_fill(64, 3, (o), (a), (d), (c))
 #endif
 #endif

+ 31 - 1
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h

@@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
 	return mm->heap_nodes;
 	return mm->heap_nodes;
 }
 }
 
 
-int  nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
+int  nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
 int  nvkm_mm_fini(struct nvkm_mm *);
 int  nvkm_mm_fini(struct nvkm_mm *);
 int  nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 int  nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 		  u32 size_min, u32 align, struct nvkm_mm_node **);
 		  u32 size_min, u32 align, struct nvkm_mm_node **);
@@ -39,9 +39,39 @@ int  nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
 void nvkm_mm_dump(struct nvkm_mm *, const char *);
 void nvkm_mm_dump(struct nvkm_mm *, const char *);
 
 
+static inline u32
+nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
+{
+	struct nvkm_mm_node *node;
+	u32 size = 0;
+	list_for_each_entry(node, &mm->nodes, nl_entry) {
+		if (node->heap == heap)
+			size += node->length;
+	}
+	return size;
+}
+
 static inline bool
 static inline bool
 nvkm_mm_contiguous(struct nvkm_mm_node *node)
 nvkm_mm_contiguous(struct nvkm_mm_node *node)
 {
 {
 	return !node->next;
 	return !node->next;
 }
 }
+
+static inline u32
+nvkm_mm_addr(struct nvkm_mm_node *node)
+{
+	if (WARN_ON(!nvkm_mm_contiguous(node)))
+		return 0;
+	return node->offset;
+}
+
+static inline u32
+nvkm_mm_size(struct nvkm_mm_node *node)
+{
+	u32 size = 0;
+	do {
+		size += node->length;
+	} while ((node = node->next));
+	return size;
+}
 #endif
 #endif

+ 12 - 29
drivers/gpu/drm/nouveau/include/nvkm/core/object.h

@@ -1,10 +1,8 @@
 #ifndef __NVKM_OBJECT_H__
 #ifndef __NVKM_OBJECT_H__
 #define __NVKM_OBJECT_H__
 #define __NVKM_OBJECT_H__
-#include <core/os.h>
-#include <core/debug.h>
+#include <core/oclass.h>
 struct nvkm_event;
 struct nvkm_event;
 struct nvkm_gpuobj;
 struct nvkm_gpuobj;
-struct nvkm_oclass;
 
 
 struct nvkm_object {
 struct nvkm_object {
 	const struct nvkm_object_func *func;
 	const struct nvkm_object_func *func;
@@ -21,13 +19,20 @@ struct nvkm_object {
 	struct rb_node node;
 	struct rb_node node;
 };
 };
 
 
+enum nvkm_object_map {
+	NVKM_OBJECT_MAP_IO,
+	NVKM_OBJECT_MAP_VA
+};
+
 struct nvkm_object_func {
 struct nvkm_object_func {
 	void *(*dtor)(struct nvkm_object *);
 	void *(*dtor)(struct nvkm_object *);
 	int (*init)(struct nvkm_object *);
 	int (*init)(struct nvkm_object *);
 	int (*fini)(struct nvkm_object *, bool suspend);
 	int (*fini)(struct nvkm_object *, bool suspend);
 	int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
 	int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
 	int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
 	int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
-	int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+	int (*map)(struct nvkm_object *, void *argv, u32 argc,
+		   enum nvkm_object_map *, u64 *addr, u64 *size);
+	int (*unmap)(struct nvkm_object *);
 	int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
 	int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
 	int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
 	int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
 	int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
 	int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
@@ -52,7 +57,9 @@ int nvkm_object_init(struct nvkm_object *);
 int nvkm_object_fini(struct nvkm_object *, bool suspend);
 int nvkm_object_fini(struct nvkm_object *, bool suspend);
 int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
 int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
 int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
 int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
-int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
+		    enum nvkm_object_map *, u64 *addr, u64 *size);
+int nvkm_object_unmap(struct nvkm_object *);
 int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8  *data);
 int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8  *data);
 int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
 int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
 int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
 int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
@@ -66,28 +73,4 @@ bool nvkm_object_insert(struct nvkm_object *);
 void nvkm_object_remove(struct nvkm_object *);
 void nvkm_object_remove(struct nvkm_object *);
 struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
 struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
 				       const struct nvkm_object_func *);
 				       const struct nvkm_object_func *);
-
-struct nvkm_sclass {
-	int minver;
-	int maxver;
-	s32 oclass;
-	const struct nvkm_object_func *func;
-	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
-		    struct nvkm_object **);
-};
-
-struct nvkm_oclass {
-	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
-		    struct nvkm_object **);
-	struct nvkm_sclass base;
-	const void *priv;
-	const void *engn;
-	u32 handle;
-	u8  route;
-	u64 token;
-	u64 object;
-	struct nvkm_client *client;
-	struct nvkm_object *parent;
-	struct nvkm_engine *engine;
-};
 #endif
 #endif

+ 31 - 0
drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h

@@ -0,0 +1,31 @@
+#ifndef __NVKM_OCLASS_H__
+#define __NVKM_OCLASS_H__
+#include <core/os.h>
+#include <core/debug.h>
+struct nvkm_oclass;
+struct nvkm_object;
+
+struct nvkm_sclass {
+	int minver;
+	int maxver;
+	s32 oclass;
+	const struct nvkm_object_func *func;
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+};
+
+struct nvkm_oclass {
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const void *priv;
+	const void *engn;
+	u32 handle;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct nvkm_client *client;
+	struct nvkm_object *parent;
+	struct nvkm_engine *engine;
+};
+#endif

+ 19 - 0
drivers/gpu/drm/nouveau/include/nvkm/core/os.h

@@ -1,4 +1,23 @@
 #ifndef __NVKM_OS_H__
 #ifndef __NVKM_OS_H__
 #define __NVKM_OS_H__
 #define __NVKM_OS_H__
 #include <nvif/os.h>
 #include <nvif/os.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native  ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native  ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do {                                             \
+	u32 __iomem *_p = (u32 __iomem *)(p);				       \
+	u64 _v = (v);							       \
+	iowrite32_native(lower_32_bits(_v), &_p[0]);			       \
+	iowrite32_native(upper_32_bits(_v), &_p[1]);			       \
+} while(0)
 #endif
 #endif

+ 1 - 0
drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h

@@ -1,6 +1,7 @@
 #ifndef __NVKM_RAMHT_H__
 #ifndef __NVKM_RAMHT_H__
 #define __NVKM_RAMHT_H__
 #define __NVKM_RAMHT_H__
 #include <core/gpuobj.h>
 #include <core/gpuobj.h>
+struct nvkm_object;
 
 
 struct nvkm_ramht_data {
 struct nvkm_ramht_data {
 	struct nvkm_gpuobj *inst;
 	struct nvkm_gpuobj *inst;

+ 1 - 1
drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h

@@ -33,7 +33,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
 /* subdev logging */
 /* subdev logging */
 #define nvkm_printk_(s,l,p,f,a...) do {                                        \
 #define nvkm_printk_(s,l,p,f,a...) do {                                        \
 	const struct nvkm_subdev *_subdev = (s);                               \
 	const struct nvkm_subdev *_subdev = (s);                               \
-	if (_subdev->debug >= (l)) {                                           \
+	if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) {            \
 		dev_##p(_subdev->device->dev, "%s: "f,                         \
 		dev_##p(_subdev->device->dev, "%s: "f,                         \
 			nvkm_subdev_name[_subdev->index], ##a);                \
 			nvkm_subdev_name[_subdev->index], ##a);                \
 	}                                                                      \
 	}                                                                      \

+ 1 - 0
drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h

@@ -1,6 +1,7 @@
 #ifndef __NVKM_DMA_H__
 #ifndef __NVKM_DMA_H__
 #define __NVKM_DMA_H__
 #define __NVKM_DMA_H__
 #include <core/engine.h>
 #include <core/engine.h>
+#include <core/object.h>
 struct nvkm_client;
 struct nvkm_client;
 
 
 struct nvkm_dmaobj {
 struct nvkm_dmaobj {

+ 3 - 2
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h

@@ -3,6 +3,7 @@
 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
 #include <core/engine.h>
 struct nvkm_fifo_chan;
 struct nvkm_fifo_chan;
+struct nvkm_gpuobj;
 
 
 enum nvkm_falcon_dmaidx {
 enum nvkm_falcon_dmaidx {
 	FALCON_DMAIDX_UCODE		= 0,
 	FALCON_DMAIDX_UCODE		= 0,
@@ -77,7 +78,7 @@ struct nvkm_falcon_func {
 	void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
 	void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
 	void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
 	void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
 	void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
 	void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
-	void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
+	void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
 	int (*wait_for_halt)(struct nvkm_falcon *, u32);
 	int (*wait_for_halt)(struct nvkm_falcon *, u32);
 	int (*clear_interrupt)(struct nvkm_falcon *, u32);
 	int (*clear_interrupt)(struct nvkm_falcon *, u32);
 	void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
 	void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
@@ -112,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
 			   bool);
 			   bool);
 void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
 void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
 void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
 void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
-void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
+void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
 void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
 void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
 void nvkm_falcon_start(struct nvkm_falcon *);
 void nvkm_falcon_start(struct nvkm_falcon *);
 int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
 int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);

+ 2 - 1
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h

@@ -1,6 +1,7 @@
 #ifndef __NVKM_FIFO_H__
 #ifndef __NVKM_FIFO_H__
 #define __NVKM_FIFO_H__
 #define __NVKM_FIFO_H__
 #include <core/engine.h>
 #include <core/engine.h>
+#include <core/object.h>
 #include <core/event.h>
 #include <core/event.h>
 
 
 #define NVKM_FIFO_CHID_NR 4096
 #define NVKM_FIFO_CHID_NR 4096
@@ -21,7 +22,7 @@ struct nvkm_fifo_chan {
 	u16 chid;
 	u16 chid;
 	struct nvkm_gpuobj *inst;
 	struct nvkm_gpuobj *inst;
 	struct nvkm_gpuobj *push;
 	struct nvkm_gpuobj *push;
-	struct nvkm_vm *vm;
+	struct nvkm_vmm *vmm;
 	void __iomem *user;
 	void __iomem *user;
 	u64 addr;
 	u64 addr;
 	u32 size;
 	u32 size;

+ 7 - 2
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h

@@ -8,17 +8,22 @@ struct nvkm_bar {
 	struct nvkm_subdev subdev;
 	struct nvkm_subdev subdev;
 
 
 	spinlock_t lock;
 	spinlock_t lock;
+	bool bar2;
 
 
 	/* whether the BAR supports to be ioremapped WC or should be uncached */
 	/* whether the BAR supports to be ioremapped WC or should be uncached */
 	bool iomap_uncached;
 	bool iomap_uncached;
 };
 };
 
 
+struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar2_init(struct nvkm_device *);
+void nvkm_bar_bar2_fini(struct nvkm_device *);
+struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
 void nvkm_bar_flush(struct nvkm_bar *);
 void nvkm_bar_flush(struct nvkm_bar *);
-struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
-int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
 
 
 int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
 #endif

+ 10 - 26
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h

@@ -1,8 +1,7 @@
 #ifndef __NVKM_FB_H__
 #ifndef __NVKM_FB_H__
 #define __NVKM_FB_H__
 #define __NVKM_FB_H__
 #include <core/subdev.h>
 #include <core/subdev.h>
-
-#include <subdev/mmu.h>
+#include <core/mm.h>
 
 
 /* memory type/access flags, do not match hardware values */
 /* memory type/access flags, do not match hardware values */
 #define NV_MEM_ACCESS_RO  1
 #define NV_MEM_ACCESS_RO  1
@@ -21,22 +20,6 @@
 #define NVKM_RAM_TYPE_VM 0x7f
 #define NVKM_RAM_TYPE_VM 0x7f
 #define NV_MEM_COMP_VM 0x03
 #define NV_MEM_COMP_VM 0x03
 
 
-struct nvkm_mem {
-	struct drm_device *dev;
-
-	struct nvkm_vma bar_vma;
-	struct nvkm_vma vma[2];
-	u8  page_shift;
-
-	struct nvkm_mm_node *tag;
-	struct nvkm_mm_node *mem;
-	dma_addr_t *pages;
-	u32 memtype;
-	u64 offset;
-	u64 size;
-	struct sg_table *sg;
-};
-
 struct nvkm_fb_tile {
 struct nvkm_fb_tile {
 	struct nvkm_mm_node *tag;
 	struct nvkm_mm_node *tag;
 	u32 addr;
 	u32 addr;
@@ -50,6 +33,7 @@ struct nvkm_fb {
 	struct nvkm_subdev subdev;
 	struct nvkm_subdev subdev;
 
 
 	struct nvkm_ram *ram;
 	struct nvkm_ram *ram;
+	struct nvkm_mm tags;
 
 
 	struct {
 	struct {
 		struct nvkm_fb_tile region[16];
 		struct nvkm_fb_tile region[16];
@@ -62,7 +46,6 @@ struct nvkm_fb {
 	struct nvkm_memory *mmu_wr;
 	struct nvkm_memory *mmu_wr;
 };
 };
 
 
-bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
 void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
 void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
 void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
@@ -129,8 +112,11 @@ struct nvkm_ram {
 	u64 size;
 	u64 size;
 
 
 #define NVKM_RAM_MM_SHIFT 12
 #define NVKM_RAM_MM_SHIFT 12
+#define NVKM_RAM_MM_ANY    (NVKM_MM_HEAP_ANY + 0)
+#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
+#define NVKM_RAM_MM_NOMAP  (NVKM_MM_HEAP_ANY + 2)
+#define NVKM_RAM_MM_MIXED  (NVKM_MM_HEAP_ANY + 3)
 	struct nvkm_mm vram;
 	struct nvkm_mm vram;
-	struct nvkm_mm tags;
 	u64 stolen;
 	u64 stolen;
 
 
 	int ranks;
 	int ranks;
@@ -147,6 +133,10 @@ struct nvkm_ram {
 	struct nvkm_ram_data target;
 	struct nvkm_ram_data target;
 };
 };
 
 
+int
+nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+	     bool contig, bool back, struct nvkm_memory **);
+
 struct nvkm_ram_func {
 struct nvkm_ram_func {
 	u64 upper;
 	u64 upper;
 	u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
 	u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
@@ -157,14 +147,8 @@ struct nvkm_ram_func {
 	void *(*dtor)(struct nvkm_ram *);
 	void *(*dtor)(struct nvkm_ram *);
 	int (*init)(struct nvkm_ram *);
 	int (*init)(struct nvkm_ram *);
 
 
-	int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
-		   u32 type, struct nvkm_mem **);
-	void (*put)(struct nvkm_ram *, struct nvkm_mem **);
-
 	int (*calc)(struct nvkm_ram *, u32 freq);
 	int (*calc)(struct nvkm_ram *, u32 freq);
 	int (*prog)(struct nvkm_ram *);
 	int (*prog)(struct nvkm_ram *);
 	void (*tidy)(struct nvkm_ram *);
 	void (*tidy)(struct nvkm_ram *);
 };
 };
-
-extern const u8 gf100_pte_storage_type_map[256];
 #endif
 #endif

+ 1 - 0
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h

@@ -9,6 +9,7 @@ struct nvkm_instmem {
 
 
 	spinlock_t lock;
 	spinlock_t lock;
 	struct list_head list;
 	struct list_head list;
+	struct list_head boot;
 	u32 reserved;
 	u32 reserved;
 
 
 	struct nvkm_memory *vbios;
 	struct nvkm_memory *vbios;

+ 2 - 5
drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h

@@ -14,8 +14,7 @@ struct nvkm_ltc {
 
 
 	u32 num_tags;
 	u32 num_tags;
 	u32 tag_base;
 	u32 tag_base;
-	struct nvkm_mm tags;
-	struct nvkm_mm_node *tag_ram;
+	struct nvkm_memory *tag_ram;
 
 
 	int zbc_min;
 	int zbc_min;
 	int zbc_max;
 	int zbc_max;
@@ -23,9 +22,7 @@ struct nvkm_ltc {
 	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 };
 };
 
 
-int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
-void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
+void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
 
 
 int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
 int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
 int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
 int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);

+ 101 - 39
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h

@@ -1,68 +1,130 @@
 #ifndef __NVKM_MMU_H__
 #ifndef __NVKM_MMU_H__
 #define __NVKM_MMU_H__
 #define __NVKM_MMU_H__
 #include <core/subdev.h>
 #include <core/subdev.h>
-#include <core/mm.h>
-struct nvkm_device;
-struct nvkm_mem;
-
-struct nvkm_vm_pgt {
-	struct nvkm_memory *mem[2];
-	u32 refcount[2];
-};
-
-struct nvkm_vm_pgd {
-	struct list_head head;
-	struct nvkm_gpuobj *obj;
-};
 
 
 struct nvkm_vma {
 struct nvkm_vma {
 	struct list_head head;
 	struct list_head head;
-	int refcount;
-	struct nvkm_vm *vm;
-	struct nvkm_mm_node *node;
-	u64 offset;
-	u32 access;
+	struct rb_node tree;
+	u64 addr;
+	u64 size:50;
+	bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
+	bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
+#define NVKM_VMA_PAGE_NONE 7
+	u8   page:3; /* Requested page type (index, or NONE for automatic). */
+	u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
+	bool used:1; /* Region allocated. */
+	bool part:1; /* Region was split from an allocated region by map(). */
+	bool user:1; /* Region user-allocated. */
+	bool busy:1; /* Region busy (for temporarily preventing user access). */
+	struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
+	struct nvkm_tags *tags; /* Compression tag reference. */
 };
 };
 
 
-struct nvkm_vm {
+struct nvkm_vmm {
+	const struct nvkm_vmm_func *func;
 	struct nvkm_mmu *mmu;
 	struct nvkm_mmu *mmu;
-
+	const char *name;
+	u32 debug;
+	struct kref kref;
 	struct mutex mutex;
 	struct mutex mutex;
-	struct nvkm_mm mm;
-	struct kref refcount;
 
 
-	struct list_head pgd_list;
+	u64 start;
+	u64 limit;
+
+	struct nvkm_vmm_pt *pd;
+	struct list_head join;
+
+	struct list_head list;
+	struct rb_root free;
+	struct rb_root root;
+
+	bool bootstrapped;
 	atomic_t engref[NVKM_SUBDEV_NR];
 	atomic_t engref[NVKM_SUBDEV_NR];
 
 
-	struct nvkm_vm_pgt *pgt;
-	u32 fpde;
-	u32 lpde;
+	dma_addr_t null;
+	void *nullp;
 };
 };
 
 
-int  nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
-		 struct lock_class_key *, struct nvkm_vm **);
-int  nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
-int  nvkm_vm_boot(struct nvkm_vm *, u64 size);
-int  nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
-		 struct nvkm_vma *);
-void nvkm_vm_put(struct nvkm_vma *);
-void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
-void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
-void nvkm_vm_unmap(struct nvkm_vma *);
-void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
+		 struct lock_class_key *, const char *name, struct nvkm_vmm **);
+struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
+void nvkm_vmm_unref(struct nvkm_vmm **);
+int nvkm_vmm_boot(struct nvkm_vmm *);
+int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
+void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
+int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
+void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
+
+struct nvkm_vmm_map {
+	struct nvkm_memory *memory;
+	u64 offset;
+
+	struct nvkm_mm_node *mem;
+	struct scatterlist *sgl;
+	dma_addr_t *dma;
+	u64 off;
+
+	const struct nvkm_vmm_page *page;
+
+	struct nvkm_tags *tags;
+	u64 next;
+	u64 type;
+	u64 ctag;
+};
+
+int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
+		 struct nvkm_vmm_map *);
+void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
+
+struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
+struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
 
 
 struct nvkm_mmu {
 struct nvkm_mmu {
 	const struct nvkm_mmu_func *func;
 	const struct nvkm_mmu_func *func;
 	struct nvkm_subdev subdev;
 	struct nvkm_subdev subdev;
 
 
-	u64 limit;
 	u8  dma_bits;
 	u8  dma_bits;
-	u8  lpg_shift;
+
+	int heap_nr;
+	struct {
+#define NVKM_MEM_VRAM                                                      0x01
+#define NVKM_MEM_HOST                                                      0x02
+#define NVKM_MEM_COMP                                                      0x04
+#define NVKM_MEM_DISP                                                      0x08
+		u8  type;
+		u64 size;
+	} heap[4];
+
+	int type_nr;
+	struct {
+#define NVKM_MEM_KIND                                                      0x10
+#define NVKM_MEM_MAPPABLE                                                  0x20
+#define NVKM_MEM_COHERENT                                                  0x40
+#define NVKM_MEM_UNCACHED                                                  0x80
+		u8 type;
+		u8 heap;
+	} type[16];
+
+	struct nvkm_vmm *vmm;
+
+	struct {
+		struct mutex mutex;
+		struct list_head list;
+	} ptc, ptp;
+
+	struct nvkm_device_oclass user;
 };
 };
 
 
 int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
 #endif

+ 1 - 0
drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h

@@ -97,4 +97,5 @@ int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 #endif
 #endif

+ 23 - 19
drivers/gpu/drm/nouveau/nouveau_abi16.c

@@ -34,6 +34,7 @@
 #include "nouveau_gem.h"
 #include "nouveau_gem.h"
 #include "nouveau_chan.h"
 #include "nouveau_chan.h"
 #include "nouveau_abi16.h"
 #include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
 
 
 static struct nouveau_abi16 *
 static struct nouveau_abi16 *
 nouveau_abi16(struct drm_file *file_priv)
 nouveau_abi16(struct drm_file *file_priv)
@@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 	}
 	}
 
 
 	if (chan->ntfy) {
 	if (chan->ntfy) {
-		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+		nouveau_vma_del(&chan->ntfy_vma);
 		nouveau_bo_unpin(chan->ntfy);
 		nouveau_bo_unpin(chan->ntfy);
 		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
 		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
 	}
 	}
@@ -184,29 +185,33 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		getparam->value = device->info.chipset;
 		getparam->value = device->info.chipset;
 		break;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		if (nvxx_device(device)->func->pci)
+		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
 			getparam->value = dev->pdev->vendor;
 			getparam->value = dev->pdev->vendor;
 		else
 		else
 			getparam->value = 0;
 			getparam->value = 0;
 		break;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		if (nvxx_device(device)->func->pci)
+		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
 			getparam->value = dev->pdev->device;
 			getparam->value = dev->pdev->device;
 		else
 		else
 			getparam->value = 0;
 			getparam->value = 0;
 		break;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
 	case NOUVEAU_GETPARAM_BUS_TYPE:
-		if (!nvxx_device(device)->func->pci)
-			getparam->value = 3;
-		else
-		if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
-			getparam->value = 0;
-		else
-		if (!pci_is_pcie(dev->pdev))
-			getparam->value = 1;
-		else
-			getparam->value = 2;
-		break;
+		switch (device->info.platform) {
+		case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
+		case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
+		case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
+		case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
+		case NV_DEVICE_INFO_V0_IGP :
+			if (!pci_is_pcie(dev->pdev))
+				getparam->value = 1;
+			else
+				getparam->value = 2;
+			break;
+		default:
+			WARN_ON(1);
+			break;
+		}
 	case NOUVEAU_GETPARAM_FB_SIZE:
 	case NOUVEAU_GETPARAM_FB_SIZE:
 		getparam->value = drm->gem.vram_available;
 		getparam->value = drm->gem.vram_available;
 		break;
 		break;
@@ -329,8 +334,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 		goto done;
 		goto done;
 
 
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
-					&chan->ntfy_vma);
+		ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
 		if (ret)
 		if (ret)
 			goto done;
 			goto done;
 	}
 	}
@@ -340,7 +344,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 	if (ret)
 	if (ret)
 		goto done;
 		goto done;
 
 
-	ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+	ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
 done:
 done:
 	if (ret)
 	if (ret)
 		nouveau_abi16_chan_fini(abi16, chan);
 		nouveau_abi16_chan_fini(abi16, chan);
@@ -548,8 +552,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
-		args.start += chan->ntfy_vma.offset;
-		args.limit += chan->ntfy_vma.offset;
+		args.start += chan->ntfy_vma->addr;
+		args.limit += chan->ntfy_vma->addr;
 	} else
 	} else
 	if (drm->agp.bridge) {
 	if (drm->agp.bridge) {
 		args.target = NV_DMA_V0_TARGET_AGP;
 		args.target = NV_DMA_V0_TARGET_AGP;

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_abi16.h

@@ -23,7 +23,7 @@ struct nouveau_abi16_chan {
 	struct nouveau_channel *chan;
 	struct nouveau_channel *chan;
 	struct list_head notifiers;
 	struct list_head notifiers;
 	struct nouveau_bo *ntfy;
 	struct nouveau_bo *ntfy;
-	struct nvkm_vma ntfy_vma;
+	struct nouveau_vma *ntfy_vma;
 	struct nvkm_mm  heap;
 	struct nvkm_mm  heap;
 };
 };
 
 

+ 5 - 1
drivers/gpu/drm/nouveau/nouveau_bios.c

@@ -1478,9 +1478,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 		case 1:
 		case 1:
 			entry->dpconf.link_bw = 270000;
 			entry->dpconf.link_bw = 270000;
 			break;
 			break;
-		default:
+		case 2:
 			entry->dpconf.link_bw = 540000;
 			entry->dpconf.link_bw = 540000;
 			break;
 			break;
+		case 3:
+		default:
+			entry->dpconf.link_bw = 810000;
+			break;
 		}
 		}
 		switch ((conf & 0x0f000000) >> 24) {
 		switch ((conf & 0x0f000000) >> 24) {
 		case 0xf:
 		case 0xf:

+ 219 - 172
drivers/gpu/drm/nouveau/nouveau_bo.c

@@ -37,6 +37,12 @@
 #include "nouveau_bo.h"
 #include "nouveau_bo.h"
 #include "nouveau_ttm.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 #include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
 
 
 /*
 /*
  * NV10-NV40 tiling helpers
  * NV10-NV40 tiling helpers
@@ -48,8 +54,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
 {
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i = reg - drm->tile.reg;
 	int i = reg - drm->tile.reg;
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
-	struct nvkm_fb *fb = device->fb;
+	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 	struct nvkm_fb_tile *tile = &fb->tile.region[i];
 	struct nvkm_fb_tile *tile = &fb->tile.region[i];
 
 
 	nouveau_fence_unref(&reg->fence);
 	nouveau_fence_unref(&reg->fence);
@@ -97,7 +102,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
 
 
 static struct nouveau_drm_tile *
 static struct nouveau_drm_tile *
 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
-		   u32 size, u32 pitch, u32 flags)
+		   u32 size, u32 pitch, u32 zeta)
 {
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
@@ -120,8 +125,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
 	}
 	}
 
 
 	if (found)
 	if (found)
-		nv10_bo_update_tile_region(dev, found, addr, size,
-					    pitch, flags);
+		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
 	return found;
 	return found;
 }
 }
 
 
@@ -155,27 +159,27 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 	struct nvif_device *device = &drm->client.device;
 	struct nvif_device *device = &drm->client.device;
 
 
 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
-		if (nvbo->tile_mode) {
+		if (nvbo->mode) {
 			if (device->info.chipset >= 0x40) {
 			if (device->info.chipset >= 0x40) {
 				*align = 65536;
 				*align = 65536;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 
 			} else if (device->info.chipset >= 0x30) {
 			} else if (device->info.chipset >= 0x30) {
 				*align = 32768;
 				*align = 32768;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 
 			} else if (device->info.chipset >= 0x20) {
 			} else if (device->info.chipset >= 0x20) {
 				*align = 16384;
 				*align = 16384;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 
 			} else if (device->info.chipset >= 0x10) {
 			} else if (device->info.chipset >= 0x10) {
 				*align = 16384;
 				*align = 16384;
-				*size = roundup_64(*size, 32 * nvbo->tile_mode);
+				*size = roundup_64(*size, 32 * nvbo->mode);
 			}
 			}
 		}
 		}
 	} else {
 	} else {
-		*size = roundup_64(*size, (1 << nvbo->page_shift));
-		*align = max((1 <<  nvbo->page_shift), *align);
+		*size = roundup_64(*size, (1 << nvbo->page));
+		*align = max((1 <<  nvbo->page), *align);
 	}
 	}
 
 
 	*size = roundup_64(*size, PAGE_SIZE);
 	*size = roundup_64(*size, PAGE_SIZE);
@@ -187,11 +191,13 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 	       struct sg_table *sg, struct reservation_object *robj,
 	       struct sg_table *sg, struct reservation_object *robj,
 	       struct nouveau_bo **pnvbo)
 	       struct nouveau_bo **pnvbo)
 {
 {
-	struct nouveau_drm *drm = nouveau_drm(cli->dev);
+	struct nouveau_drm *drm = cli->drm;
 	struct nouveau_bo *nvbo;
 	struct nouveau_bo *nvbo;
+	struct nvif_mmu *mmu = &cli->mmu;
+	struct nvif_vmm *vmm = &cli->vmm.vmm;
 	size_t acc_size;
 	size_t acc_size;
-	int ret;
 	int type = ttm_bo_type_device;
 	int type = ttm_bo_type_device;
+	int ret, i, pi = -1;
 
 
 	if (!size) {
 	if (!size) {
 		NV_WARN(drm, "skipped size %016llx\n", size);
 		NV_WARN(drm, "skipped size %016llx\n", size);
@@ -207,19 +213,80 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 	INIT_LIST_HEAD(&nvbo->head);
 	INIT_LIST_HEAD(&nvbo->head);
 	INIT_LIST_HEAD(&nvbo->entry);
 	INIT_LIST_HEAD(&nvbo->entry);
 	INIT_LIST_HEAD(&nvbo->vma_list);
 	INIT_LIST_HEAD(&nvbo->vma_list);
-	nvbo->tile_mode = tile_mode;
-	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 	nvbo->cli = cli;
 	nvbo->cli = cli;
 
 
-	if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
-		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+	/* This is confusing, and doesn't actually mean we want an uncached
+	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+	 * into in nouveau_gem_new().
+	 */
+	if (flags & TTM_PL_FLAG_UNCACHED) {
+		/* Determine if we can get a cache-coherent map, forcing
+		 * uncached mapping if we can't.
+		 */
+		if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+			nvbo->force_coherent = true;
+	}
+
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+			kfree(nvbo);
+			return -EINVAL;
+		}
+
+		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+	} else
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+		nvbo->comp = (tile_flags & 0x00030000) >> 16;
+		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+			kfree(nvbo);
+			return -EINVAL;
+		}
+	} else {
+		nvbo->zeta = (tile_flags & 0x00000007);
+	}
+	nvbo->mode = tile_mode;
+	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+
+	/* Determine the desirable target GPU page size for the buffer. */
+	for (i = 0; i < vmm->page_nr; i++) {
+		/* Because we cannot currently allow VMM maps to fail
+		 * during buffer migration, we need to determine page
+		 * size for the buffer up-front, and pre-allocate its
+		 * page tables.
+		 *
+		 * Skip page sizes that can't support needed domains.
+		 */
+		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+			continue;
+		if ((flags & TTM_PL_FLAG_TT  ) && !vmm->page[i].host)
+			continue;
+
+		/* Select this page size if it's the first that supports
+		 * the potential memory domains, or when it's compatible
+		 * with the requested compression settings.
+		 */
+		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+			pi = i;
+
+		/* Stop once the buffer is larger than the current page size. */
+		if (size >= 1ULL << vmm->page[i].shift)
+			break;
+	}
+
+	if (WARN_ON(pi < 0))
+		return -EINVAL;
 
 
-	nvbo->page_shift = 12;
-	if (drm->client.vm) {
-		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
-			nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
+	/* Disable compression if suitable settings couldn't be found. */
+	if (nvbo->comp && !vmm->page[pi].comp) {
+		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+			nvbo->kind = mmu->kind[nvbo->kind];
+		nvbo->comp = 0;
 	}
 	}
+	nvbo->page = vmm->page[pi].shift;
 
 
 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
@@ -262,7 +329,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 	unsigned i, fpfn, lpfn;
 	unsigned i, fpfn, lpfn;
 
 
 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
-	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
 		/*
 		 * Make sure that the color and depth buffers are handled
 		 * Make sure that the color and depth buffers are handled
@@ -270,7 +337,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 		 * speed up when alpha-blending and depth-test are enabled
 		 * speed up when alpha-blending and depth-test are enabled
 		 * at the same time.
 		 * at the same time.
 		 */
 		 */
-		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+		if (nvbo->zeta) {
 			fpfn = vram_pages / 2;
 			fpfn = vram_pages / 2;
 			lpfn = ~0;
 			lpfn = ~0;
 		} else {
 		} else {
@@ -321,14 +388,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 	    memtype == TTM_PL_FLAG_VRAM && contig) {
 	    memtype == TTM_PL_FLAG_VRAM && contig) {
-		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
-			if (bo->mem.mem_type == TTM_PL_VRAM) {
-				struct nvkm_mem *mem = bo->mem.mm_node;
-				if (!nvkm_mm_contiguous(mem->mem))
-					evict = true;
-			}
-			nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+		if (!nvbo->contig) {
+			nvbo->contig = true;
 			force = true;
 			force = true;
+			evict = true;
 		}
 		}
 	}
 	}
 
 
@@ -376,7 +439,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 
 
 out:
 out:
 	if (force && ret)
 	if (force && ret)
-		nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
+		nvbo->contig = false;
 	ttm_bo_unreserve(bo);
 	ttm_bo_unreserve(bo);
 	return ret;
 	return ret;
 }
 }
@@ -446,7 +509,6 @@ void
 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 {
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 	int i;
 	int i;
 
 
@@ -458,7 +520,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 		return;
 		return;
 
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+		dma_sync_single_for_device(drm->dev->dev,
+					   ttm_dma->dma_address[i],
 					   PAGE_SIZE, DMA_TO_DEVICE);
 					   PAGE_SIZE, DMA_TO_DEVICE);
 }
 }
 
 
@@ -466,7 +529,6 @@ void
 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 {
 {
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 	int i;
 	int i;
 
 
@@ -478,7 +540,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 		return;
 		return;
 
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
 					PAGE_SIZE, DMA_FROM_DEVICE);
 					PAGE_SIZE, DMA_FROM_DEVICE);
 }
 }
 
 
@@ -568,6 +630,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 			 struct ttm_mem_type_manager *man)
 			 struct ttm_mem_type_manager *man)
 {
 {
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nvif_mmu *mmu = &drm->client.mmu;
 
 
 	switch (type) {
 	switch (type) {
 	case TTM_PL_SYSTEM:
 	case TTM_PL_SYSTEM:
@@ -584,7 +647,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 
 
 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 			/* Some BARs do not support being ioremapped WC */
 			/* Some BARs do not support being ioremapped WC */
-			if (nvxx_bar(&drm->client.device)->iomap_uncached) {
+			const u8 type = mmu->type[drm->ttm.type_vram].type;
+			if (type & NVIF_MEM_UNCACHED) {
 				man->available_caching = TTM_PL_FLAG_UNCACHED;
 				man->available_caching = TTM_PL_FLAG_UNCACHED;
 				man->default_caching = TTM_PL_FLAG_UNCACHED;
 				man->default_caching = TTM_PL_FLAG_UNCACHED;
 			}
 			}
@@ -659,14 +723,14 @@ static int
 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	int ret = RING_SPACE(chan, 10);
 	int ret = RING_SPACE(chan, 10);
 	if (ret == 0) {
 	if (ret == 0) {
 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
+		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
 		OUT_RING  (chan, PAGE_SIZE);
@@ -691,9 +755,9 @@ static int
 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
 	u32 page_count = new_reg->num_pages;
 	u32 page_count = new_reg->num_pages;
 	int ret;
 	int ret;
 
 
@@ -729,9 +793,9 @@ static int
 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
 	u32 page_count = new_reg->num_pages;
 	u32 page_count = new_reg->num_pages;
 	int ret;
 	int ret;
 
 
@@ -768,9 +832,9 @@ static int
 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
 	u32 page_count = new_reg->num_pages;
 	u32 page_count = new_reg->num_pages;
 	int ret;
 	int ret;
 
 
@@ -806,14 +870,14 @@ static int
 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	int ret = RING_SPACE(chan, 7);
 	int ret = RING_SPACE(chan, 7);
 	if (ret == 0) {
 	if (ret == 0) {
 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
+		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 		OUT_RING  (chan, 0x00000000 /* COPY */);
 		OUT_RING  (chan, 0x00000000 /* COPY */);
 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
 	}
 	}
@@ -824,15 +888,15 @@ static int
 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	int ret = RING_SPACE(chan, 7);
 	int ret = RING_SPACE(chan, 7);
 	if (ret == 0) {
 	if (ret == 0) {
 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
-		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
-		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
-		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
+		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
+		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
+		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
 	}
 	}
 	return ret;
 	return ret;
@@ -858,12 +922,12 @@ static int
 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 {
 {
-	struct nvkm_mem *mem = old_reg->mm_node;
+	struct nouveau_mem *mem = nouveau_mem(old_reg);
 	u64 length = (new_reg->num_pages << PAGE_SHIFT);
 	u64 length = (new_reg->num_pages << PAGE_SHIFT);
-	u64 src_offset = mem->vma[0].offset;
-	u64 dst_offset = mem->vma[1].offset;
-	int src_tiled = !!mem->memtype;
-	int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
+	u64 src_offset = mem->vma[0].addr;
+	u64 dst_offset = mem->vma[1].addr;
+	int src_tiled = !!mem->kind;
+	int dst_tiled = !!nouveau_mem(new_reg)->kind;
 	int ret;
 	int ret;
 
 
 	while (length) {
 	while (length) {
@@ -1000,25 +1064,31 @@ static int
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 		     struct ttm_mem_reg *reg)
 		     struct ttm_mem_reg *reg)
 {
 {
-	struct nvkm_mem *old_mem = bo->mem.mm_node;
-	struct nvkm_mem *new_mem = reg->mm_node;
-	u64 size = (u64)reg->num_pages << PAGE_SHIFT;
+	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+	struct nouveau_mem *new_mem = nouveau_mem(reg);
+	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
 	int ret;
 	int ret;
 
 
-	ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
-			  NV_MEM_ACCESS_RW, &old_mem->vma[0]);
+	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
+			   old_mem->mem.size, &old_mem->vma[0]);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
-			  NV_MEM_ACCESS_RW, &old_mem->vma[1]);
+	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
+			   new_mem->mem.size, &old_mem->vma[1]);
+	if (ret)
+		goto done;
+
+	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
+	if (ret)
+		goto done;
+
+	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
+done:
 	if (ret) {
 	if (ret) {
-		nvkm_vm_put(&old_mem->vma[0]);
-		return ret;
+		nvif_vmm_put(vmm, &old_mem->vma[1]);
+		nvif_vmm_put(vmm, &old_mem->vma[0]);
 	}
 	}
-
-	nvkm_vm_map(&old_mem->vma[0], old_mem);
-	nvkm_vm_map(&old_mem->vma[1], new_mem);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1200,21 +1270,23 @@ static void
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
 		     struct ttm_mem_reg *new_reg)
 		     struct ttm_mem_reg *new_reg)
 {
 {
+	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 
 
 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
 	if (bo->destroy != nouveau_bo_del_ttm)
 	if (bo->destroy != nouveau_bo_del_ttm)
 		return;
 		return;
 
 
-	list_for_each_entry(vma, &nvbo->vma_list, head) {
-		if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
-			      (new_reg->mem_type == TTM_PL_VRAM ||
-			       nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
-			nvkm_vm_map(vma, new_reg->mm_node);
-		} else {
+	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
+	    mem->mem.page == nvbo->page) {
+		list_for_each_entry(vma, &nvbo->vma_list, head) {
+			nouveau_vma_map(vma, mem);
+		}
+	} else {
+		list_for_each_entry(vma, &nvbo->vma_list, head) {
 			WARN_ON(ttm_bo_wait(bo, false, false));
 			WARN_ON(ttm_bo_wait(bo, false, false));
-			nvkm_vm_unmap(vma);
+			nouveau_vma_unmap(vma);
 		}
 		}
 	}
 	}
 }
 }
@@ -1234,8 +1306,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
 
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
 		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
 		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
-						nvbo->tile_mode,
-						nvbo->tile_flags);
+					       nvbo->mode, nvbo->zeta);
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -1331,8 +1402,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
 	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
-	struct nvkm_mem *mem = reg->mm_node;
-	int ret;
+	struct nouveau_mem *mem = nouveau_mem(reg);
 
 
 	reg->bus.addr = NULL;
 	reg->bus.addr = NULL;
 	reg->bus.offset = 0;
 	reg->bus.offset = 0;
@@ -1353,7 +1423,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 			reg->bus.is_iomem = !drm->agp.cma;
 			reg->bus.is_iomem = !drm->agp.cma;
 		}
 		}
 #endif
 #endif
-		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
+		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
 			/* untiled */
 			/* untiled */
 			break;
 			break;
 		/* fallthrough, tiled memory */
 		/* fallthrough, tiled memory */
@@ -1361,19 +1431,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 		reg->bus.offset = reg->start << PAGE_SHIFT;
 		reg->bus.offset = reg->start << PAGE_SHIFT;
 		reg->bus.base = device->func->resource_addr(device, 1);
 		reg->bus.base = device->func->resource_addr(device, 1);
 		reg->bus.is_iomem = true;
 		reg->bus.is_iomem = true;
-		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-			struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
-			int page_shift = 12;
-			if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
-				page_shift = mem->page_shift;
+		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+			union {
+				struct nv50_mem_map_v0 nv50;
+				struct gf100_mem_map_v0 gf100;
+			} args;
+			u64 handle, length;
+			u32 argc = 0;
+			int ret;
+
+			switch (mem->mem.object.oclass) {
+			case NVIF_CLASS_MEM_NV50:
+				args.nv50.version = 0;
+				args.nv50.ro = 0;
+				args.nv50.kind = mem->kind;
+				args.nv50.comp = mem->comp;
+				break;
+			case NVIF_CLASS_MEM_GF100:
+				args.gf100.version = 0;
+				args.gf100.ro = 0;
+				args.gf100.kind = mem->kind;
+				break;
+			default:
+				WARN_ON(1);
+				break;
+			}
 
 
-			ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
-					    &mem->bar_vma);
-			if (ret)
-				return ret;
+			ret = nvif_object_map_handle(&mem->mem.object,
+						     &argc, argc,
+						     &handle, &length);
+			if (ret != 1)
+				return ret ? ret : -EINVAL;
 
 
-			nvkm_vm_map(&mem->bar_vma, mem);
-			reg->bus.offset = mem->bar_vma.offset;
+			reg->bus.base = 0;
+			reg->bus.offset = handle;
 		}
 		}
 		break;
 		break;
 	default:
 	default:
@@ -1385,13 +1476,22 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 static void
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
 {
 {
-	struct nvkm_mem *mem = reg->mm_node;
-
-	if (!mem->bar_vma.node)
-		return;
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nouveau_mem *mem = nouveau_mem(reg);
 
 
-	nvkm_vm_unmap(&mem->bar_vma);
-	nvkm_vm_put(&mem->bar_vma);
+	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+		switch (reg->mem_type) {
+		case TTM_PL_TT:
+			if (mem->kind)
+				nvif_object_unmap_handle(&mem->mem.object);
+			break;
+		case TTM_PL_VRAM:
+			nvif_object_unmap_handle(&mem->mem.object);
+			break;
+		default:
+			break;
+		}
+	}
 }
 }
 
 
 static int
 static int
@@ -1408,7 +1508,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 	 */
 	 */
 	if (bo->mem.mem_type != TTM_PL_VRAM) {
 	if (bo->mem.mem_type != TTM_PL_VRAM) {
 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
-		    !nouveau_bo_tile_layout(nvbo))
+		    !nvbo->kind)
 			return 0;
 			return 0;
 
 
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
@@ -1445,9 +1545,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 {
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct nouveau_drm *drm;
 	struct nouveau_drm *drm;
-	struct nvkm_device *device;
-	struct drm_device *dev;
-	struct device *pdev;
+	struct device *dev;
 	unsigned i;
 	unsigned i;
 	int r;
 	int r;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1464,9 +1562,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 	}
 	}
 
 
 	drm = nouveau_bdev(ttm->bdev);
 	drm = nouveau_bdev(ttm->bdev);
-	device = nvxx_device(&drm->client.device);
-	dev = drm->dev;
-	pdev = device->dev;
+	dev = drm->dev->dev;
 
 
 #if IS_ENABLED(CONFIG_AGP)
 #if IS_ENABLED(CONFIG_AGP)
 	if (drm->agp.bridge) {
 	if (drm->agp.bridge) {
@@ -1476,7 +1572,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 
 
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 	if (swiotlb_nr_tbl()) {
 	if (swiotlb_nr_tbl()) {
-		return ttm_dma_populate((void *)ttm, dev->dev);
+		return ttm_dma_populate((void *)ttm, dev);
 	}
 	}
 #endif
 #endif
 
 
@@ -1488,12 +1584,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 	for (i = 0; i < ttm->num_pages; i++) {
 	for (i = 0; i < ttm->num_pages; i++) {
 		dma_addr_t addr;
 		dma_addr_t addr;
 
 
-		addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+		addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
 				    DMA_BIDIRECTIONAL);
 				    DMA_BIDIRECTIONAL);
 
 
-		if (dma_mapping_error(pdev, addr)) {
+		if (dma_mapping_error(dev, addr)) {
 			while (i--) {
 			while (i--) {
-				dma_unmap_page(pdev, ttm_dma->dma_address[i],
+				dma_unmap_page(dev, ttm_dma->dma_address[i],
 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
 				ttm_dma->dma_address[i] = 0;
 				ttm_dma->dma_address[i] = 0;
 			}
 			}
@@ -1511,9 +1607,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 {
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
 	struct nouveau_drm *drm;
 	struct nouveau_drm *drm;
-	struct nvkm_device *device;
-	struct drm_device *dev;
-	struct device *pdev;
+	struct device *dev;
 	unsigned i;
 	unsigned i;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
 
@@ -1521,9 +1615,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 		return;
 		return;
 
 
 	drm = nouveau_bdev(ttm->bdev);
 	drm = nouveau_bdev(ttm->bdev);
-	device = nvxx_device(&drm->client.device);
-	dev = drm->dev;
-	pdev = device->dev;
+	dev = drm->dev->dev;
 
 
 #if IS_ENABLED(CONFIG_AGP)
 #if IS_ENABLED(CONFIG_AGP)
 	if (drm->agp.bridge) {
 	if (drm->agp.bridge) {
@@ -1534,14 +1626,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 
 
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 	if (swiotlb_nr_tbl()) {
 	if (swiotlb_nr_tbl()) {
-		ttm_dma_unpopulate((void *)ttm, dev->dev);
+		ttm_dma_unpopulate((void *)ttm, dev);
 		return;
 		return;
 	}
 	}
 #endif
 #endif
 
 
 	for (i = 0; i < ttm->num_pages; i++) {
 	for (i = 0; i < ttm->num_pages; i++) {
 		if (ttm_dma->dma_address[i]) {
 		if (ttm_dma->dma_address[i]) {
-			dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+			dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
 				       DMA_BIDIRECTIONAL);
 				       DMA_BIDIRECTIONAL);
 		}
 		}
 	}
 	}
@@ -1576,48 +1668,3 @@ struct ttm_bo_driver nouveau_bo_driver = {
 	.io_mem_free = &nouveau_ttm_io_mem_free,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
 	.io_mem_pfn = ttm_bo_default_io_mem_pfn,
 	.io_mem_pfn = ttm_bo_default_io_mem_pfn,
 };
 };
-
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
-{
-	struct nvkm_vma *vma;
-	list_for_each_entry(vma, &nvbo->vma_list, head) {
-		if (vma->vm == vm)
-			return vma;
-	}
-
-	return NULL;
-}
-
-int
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
-		   struct nvkm_vma *vma)
-{
-	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-	int ret;
-
-	ret = nvkm_vm_get(vm, size, nvbo->page_shift,
-			     NV_MEM_ACCESS_RW, vma);
-	if (ret)
-		return ret;
-
-	if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
-	    (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
-	     nvbo->page_shift != vma->vm->mmu->lpg_shift))
-		nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
-
-	list_add_tail(&vma->head, &nvbo->vma_list);
-	vma->refcount = 1;
-	return 0;
-}
-
-void
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
-{
-	if (vma->node) {
-		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
-			nvkm_vm_unmap(vma);
-		nvkm_vm_put(vma);
-		list_del(&vma->head);
-	}
-}

+ 7 - 10
drivers/gpu/drm/nouveau/nouveau_bo.h

@@ -24,12 +24,16 @@ struct nouveau_bo {
 	bool validate_mapped;
 	bool validate_mapped;
 
 
 	struct list_head vma_list;
 	struct list_head vma_list;
-	unsigned page_shift;
 
 
 	struct nouveau_cli *cli;
 	struct nouveau_cli *cli;
 
 
-	u32 tile_mode;
-	u32 tile_flags;
+	unsigned contig:1;
+	unsigned page:5;
+	unsigned kind:8;
+	unsigned comp:3;
+	unsigned zeta:3;
+	unsigned mode;
+
 	struct nouveau_drm_tile *tile;
 	struct nouveau_drm_tile *tile;
 
 
 	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
 	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
@@ -89,13 +93,6 @@ int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
 void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
 void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
 void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
 void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
 
 
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
-
-int  nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
-			struct nvkm_vma *);
-void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
-
 /* TODO: submit equivalent to TTM generic API upstream? */
 /* TODO: submit equivalent to TTM generic API upstream? */
 static inline void __iomem *
 static inline void __iomem *
 nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
 nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)

+ 32 - 19
drivers/gpu/drm/nouveau/nouveau_chan.c

@@ -40,6 +40,7 @@
 #include "nouveau_chan.h"
 #include "nouveau_chan.h"
 #include "nouveau_fence.h"
 #include "nouveau_fence.h"
 #include "nouveau_abi16.h"
 #include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
 
 
 MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
 MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
 int nouveau_vram_pushbuf;
 int nouveau_vram_pushbuf;
@@ -83,6 +84,14 @@ nouveau_channel_del(struct nouveau_channel **pchan)
 {
 {
 	struct nouveau_channel *chan = *pchan;
 	struct nouveau_channel *chan = *pchan;
 	if (chan) {
 	if (chan) {
+		struct nouveau_cli *cli = (void *)chan->user.client;
+		bool super;
+
+		if (cli) {
+			super = cli->base.super;
+			cli->base.super = true;
+		}
+
 		if (chan->fence)
 		if (chan->fence)
 			nouveau_fence(chan->drm)->context_del(chan);
 			nouveau_fence(chan->drm)->context_del(chan);
 		nvif_object_fini(&chan->nvsw);
 		nvif_object_fini(&chan->nvsw);
@@ -91,12 +100,15 @@ nouveau_channel_del(struct nouveau_channel **pchan)
 		nvif_notify_fini(&chan->kill);
 		nvif_notify_fini(&chan->kill);
 		nvif_object_fini(&chan->user);
 		nvif_object_fini(&chan->user);
 		nvif_object_fini(&chan->push.ctxdma);
 		nvif_object_fini(&chan->push.ctxdma);
-		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+		nouveau_vma_del(&chan->push.vma);
 		nouveau_bo_unmap(chan->push.buffer);
 		nouveau_bo_unmap(chan->push.buffer);
 		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
 		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
 			nouveau_bo_unpin(chan->push.buffer);
 			nouveau_bo_unpin(chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
 		kfree(chan);
 		kfree(chan);
+
+		if (cli)
+			cli->base.super = super;
 	}
 	}
 	*pchan = NULL;
 	*pchan = NULL;
 }
 }
@@ -106,7 +118,6 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		     u32 size, struct nouveau_channel **pchan)
 		     u32 size, struct nouveau_channel **pchan)
 {
 {
 	struct nouveau_cli *cli = (void *)device->object.client;
 	struct nouveau_cli *cli = (void *)device->object.client;
-	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	struct nv_dma_v0 args = {};
 	struct nouveau_channel *chan;
 	struct nouveau_channel *chan;
 	u32 target;
 	u32 target;
@@ -142,11 +153,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 	 * pushbuf lives in, this is because the GEM code requires that
 	 * pushbuf lives in, this is because the GEM code requires that
 	 * we be able to call out to other (indirect) push buffers
 	 * we be able to call out to other (indirect) push buffers
 	 */
 	 */
-	chan->push.vma.offset = chan->push.buffer->bo.offset;
+	chan->push.addr = chan->push.buffer->bo.offset;
 
 
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
-					&chan->push.vma);
+		ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
+				      &chan->push.vma);
 		if (ret) {
 		if (ret) {
 			nouveau_channel_del(pchan);
 			nouveau_channel_del(pchan);
 			return ret;
 			return ret;
@@ -155,7 +166,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
 		args.start = 0;
 		args.start = 0;
-		args.limit = cli->vm->mmu->limit - 1;
+		args.limit = cli->vmm.vmm.limit - 1;
+
+		chan->push.addr = chan->push.vma->addr;
 	} else
 	} else
 	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
 	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
 		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
 		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -185,7 +198,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = 0;
 			args.start = 0;
-			args.limit = mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		}
 		}
 	}
 	}
 
 
@@ -203,6 +216,7 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 		    u32 engine, struct nouveau_channel **pchan)
 		    u32 engine, struct nouveau_channel **pchan)
 {
 {
+	struct nouveau_cli *cli = (void *)device->object.client;
 	static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
 	static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
 					MAXWELL_CHANNEL_GPFIFO_A,
 					MAXWELL_CHANNEL_GPFIFO_A,
 					KEPLER_CHANNEL_GPFIFO_B,
 					KEPLER_CHANNEL_GPFIFO_B,
@@ -233,22 +247,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 			args.kepler.version = 0;
 			args.kepler.version = 0;
 			args.kepler.engines = engine;
 			args.kepler.engines = engine;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ilength = 0x02000;
-			args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
-			args.kepler.vm = 0;
+			args.kepler.ioffset = 0x10000 + chan->push.addr;
+			args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.kepler);
 			size = sizeof(args.kepler);
 		} else
 		} else
 		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
 		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
 			args.fermi.version = 0;
 			args.fermi.version = 0;
 			args.fermi.ilength = 0x02000;
 			args.fermi.ilength = 0x02000;
-			args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
-			args.fermi.vm = 0;
+			args.fermi.ioffset = 0x10000 + chan->push.addr;
+			args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.fermi);
 			size = sizeof(args.fermi);
 		} else {
 		} else {
 			args.nv50.version = 0;
 			args.nv50.version = 0;
 			args.nv50.ilength = 0x02000;
 			args.nv50.ilength = 0x02000;
-			args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+			args.nv50.ioffset = 0x10000 + chan->push.addr;
 			args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
 			args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
-			args.nv50.vm = 0;
+			args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.nv50);
 			size = sizeof(args.nv50);
 		}
 		}
 
 
@@ -293,7 +307,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
 	/* create channel object */
 	/* create channel object */
 	args.version = 0;
 	args.version = 0;
 	args.pushbuf = nvif_handle(&chan->push.ctxdma);
 	args.pushbuf = nvif_handle(&chan->push.ctxdma);
-	args.offset = chan->push.vma.offset;
+	args.offset = chan->push.addr;
 
 
 	do {
 	do {
 		ret = nvif_object_init(&device->object, 0, *oclass++,
 		ret = nvif_object_init(&device->object, 0, *oclass++,
@@ -314,11 +328,10 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 	struct nvif_device *device = chan->device;
 	struct nvif_device *device = chan->device;
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_drm *drm = chan->drm;
 	struct nouveau_drm *drm = chan->drm;
-	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	struct nv_dma_v0 args = {};
 	int ret, i;
 	int ret, i;
 
 
-	nvif_object_map(&chan->user);
+	nvif_object_map(&chan->user, NULL, 0);
 
 
 	if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
 	if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
 		ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
 		ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
@@ -339,7 +352,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_VM;
 			args.access = NV_DMA_V0_ACCESS_VM;
 			args.start = 0;
 			args.start = 0;
-			args.limit = cli->vm->mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		} else {
 		} else {
 			args.target = NV_DMA_V0_TARGET_VRAM;
 			args.target = NV_DMA_V0_TARGET_VRAM;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -356,7 +369,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_VM;
 			args.access = NV_DMA_V0_ACCESS_VM;
 			args.start = 0;
 			args.start = 0;
-			args.limit = cli->vm->mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		} else
 		} else
 		if (chan->drm->agp.bridge) {
 		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.target = NV_DMA_V0_TARGET_AGP;
@@ -368,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.target = NV_DMA_V0_TARGET_VM;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = 0;
 			args.start = 0;
-			args.limit = mmu->limit - 1;
+			args.limit = cli->vmm.vmm.limit - 1;
 		}
 		}
 
 
 		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
 		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,

+ 2 - 1
drivers/gpu/drm/nouveau/nouveau_chan.h

@@ -16,8 +16,9 @@ struct nouveau_channel {
 
 
 	struct {
 	struct {
 		struct nouveau_bo *buffer;
 		struct nouveau_bo *buffer;
-		struct nvkm_vma vma;
+		struct nouveau_vma *vma;
 		struct nvif_object ctxdma;
 		struct nvif_object ctxdma;
+		u64 addr;
 	} push;
 	} push;
 
 
 	/* TODO: this will be reworked in the near future */
 	/* TODO: this will be reworked in the near future */

+ 1 - 4
drivers/gpu/drm/nouveau/nouveau_display.h

@@ -1,14 +1,11 @@
 #ifndef __NOUVEAU_DISPLAY_H__
 #ifndef __NOUVEAU_DISPLAY_H__
 #define __NOUVEAU_DISPLAY_H__
 #define __NOUVEAU_DISPLAY_H__
-
-#include <subdev/mmu.h>
-
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 
 
 struct nouveau_framebuffer {
 struct nouveau_framebuffer {
 	struct drm_framebuffer base;
 	struct drm_framebuffer base;
 	struct nouveau_bo *nvbo;
 	struct nouveau_bo *nvbo;
-	struct nvkm_vma vma;
+	struct nouveau_vma *vma;
 	u32 r_handle;
 	u32 r_handle;
 	u32 r_format;
 	u32 r_format;
 	u32 r_pitch;
 	u32 r_pitch;

+ 8 - 7
drivers/gpu/drm/nouveau/nouveau_dma.c

@@ -26,6 +26,7 @@
 
 
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
+#include "nouveau_vmm.h"
 
 
 void
 void
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
@@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 			return -EBUSY;
 			return -EBUSY;
 	}
 	}
 
 
-	if (val < chan->push.vma.offset ||
-	    val > chan->push.vma.offset + (chan->dma.max << 2))
+	if (val < chan->push.addr ||
+	    val > chan->push.addr + (chan->dma.max << 2))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	return (val - chan->push.vma.offset) >> 2;
+	return (val - chan->push.addr) >> 2;
 }
 }
 
 
 void
 void
@@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 {
 {
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_bo *pb = chan->push.buffer;
 	struct nouveau_bo *pb = chan->push.buffer;
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
 	u64 offset;
 	u64 offset;
 
 
-	vma = nouveau_bo_vma_find(bo, cli->vm);
+	vma = nouveau_vma_find(bo, &cli->vmm);
 	BUG_ON(!vma);
 	BUG_ON(!vma);
-	offset = vma->offset + delta;
+	offset = vma->addr + delta;
 
 
 	BUG_ON(chan->dma.ib_free < 1);
 	BUG_ON(chan->dma.ib_free < 1);
 
 
@@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
 			 * instruct the GPU to jump back to the start right
 			 * instruct the GPU to jump back to the start right
 			 * after processing the currently pending commands.
 			 * after processing the currently pending commands.
 			 */
 			 */
-			OUT_RING(chan, chan->push.vma.offset | 0x20000000);
+			OUT_RING(chan, chan->push.addr | 0x20000000);
 
 
 			/* wait for GET to depart from the skips area.
 			/* wait for GET to depart from the skips area.
 			 * prevents writing GET==PUT and causing a race
 			 * prevents writing GET==PUT and causing a race

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_dma.h

@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
 #define WRITE_PUT(val) do {                                                    \
 #define WRITE_PUT(val) do {                                                    \
 	mb();                                                   \
 	mb();                                                   \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
-	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
 } while (0)
 } while (0)
 
 
 static inline void
 static inline void

+ 130 - 31
drivers/gpu/drm/nouveau/nouveau_drm.c

@@ -111,33 +111,119 @@ nouveau_name(struct drm_device *dev)
 		return nouveau_platform_name(to_platform_device(dev->dev));
 		return nouveau_platform_name(to_platform_device(dev->dev));
 }
 }
 
 
+static inline bool
+nouveau_cli_work_ready(struct dma_fence *fence, bool wait)
+{
+	if (!dma_fence_is_signaled(fence)) {
+		if (!wait)
+			return false;
+		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+	}
+	dma_fence_put(fence);
+	return true;
+}
+
+static void
+nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait)
+{
+	struct nouveau_cli_work *work, *wtmp;
+	mutex_lock(&cli->lock);
+	list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
+		if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) {
+			list_del(&work->head);
+			work->func(work);
+		}
+	}
+	mutex_unlock(&cli->lock);
+}
+
+static void
+nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
+	schedule_work(&work->cli->work);
+}
+
+void
+nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
+		       struct nouveau_cli_work *work)
+{
+	work->fence = dma_fence_get(fence);
+	work->cli = cli;
+	mutex_lock(&cli->lock);
+	list_add_tail(&work->head, &cli->worker);
+	mutex_unlock(&cli->lock);
+	if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
+		nouveau_cli_work_fence(fence, &work->cb);
+}
+
+static void
+nouveau_cli_work(struct work_struct *w)
+{
+	struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
+	nouveau_cli_work_flush(cli, false);
+}
+
 static void
 static void
 nouveau_cli_fini(struct nouveau_cli *cli)
 nouveau_cli_fini(struct nouveau_cli *cli)
 {
 {
-	nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
+	nouveau_cli_work_flush(cli, true);
 	usif_client_fini(cli);
 	usif_client_fini(cli);
+	nouveau_vmm_fini(&cli->vmm);
+	nvif_mmu_fini(&cli->mmu);
 	nvif_device_fini(&cli->device);
 	nvif_device_fini(&cli->device);
+	mutex_lock(&cli->drm->master.lock);
 	nvif_client_fini(&cli->base);
 	nvif_client_fini(&cli->base);
+	mutex_unlock(&cli->drm->master.lock);
 }
 }
 
 
 static int
 static int
 nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 		 struct nouveau_cli *cli)
 		 struct nouveau_cli *cli)
 {
 {
+	static const struct nvif_mclass
+	mems[] = {
+		{ NVIF_CLASS_MEM_GF100, -1 },
+		{ NVIF_CLASS_MEM_NV50 , -1 },
+		{ NVIF_CLASS_MEM_NV04 , -1 },
+		{}
+	};
+	static const struct nvif_mclass
+	mmus[] = {
+		{ NVIF_CLASS_MMU_GF100, -1 },
+		{ NVIF_CLASS_MMU_NV50 , -1 },
+		{ NVIF_CLASS_MMU_NV04 , -1 },
+		{}
+	};
+	static const struct nvif_mclass
+	vmms[] = {
+		{ NVIF_CLASS_VMM_GP100, -1 },
+		{ NVIF_CLASS_VMM_GM200, -1 },
+		{ NVIF_CLASS_VMM_GF100, -1 },
+		{ NVIF_CLASS_VMM_NV50 , -1 },
+		{ NVIF_CLASS_VMM_NV04 , -1 },
+		{}
+	};
 	u64 device = nouveau_name(drm->dev);
 	u64 device = nouveau_name(drm->dev);
 	int ret;
 	int ret;
 
 
 	snprintf(cli->name, sizeof(cli->name), "%s", sname);
 	snprintf(cli->name, sizeof(cli->name), "%s", sname);
-	cli->dev = drm->dev;
+	cli->drm = drm;
 	mutex_init(&cli->mutex);
 	mutex_init(&cli->mutex);
 	usif_client_init(cli);
 	usif_client_init(cli);
 
 
-	if (cli == &drm->client) {
+	INIT_WORK(&cli->work, nouveau_cli_work);
+	INIT_LIST_HEAD(&cli->worker);
+	mutex_init(&cli->lock);
+
+	if (cli == &drm->master) {
 		ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
 		ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
 				       cli->name, device, &cli->base);
 				       cli->name, device, &cli->base);
 	} else {
 	} else {
-		ret = nvif_client_init(&drm->client.base, cli->name, device,
+		mutex_lock(&drm->master.lock);
+		ret = nvif_client_init(&drm->master.base, cli->name, device,
 				       &cli->base);
 				       &cli->base);
+		mutex_unlock(&drm->master.lock);
 	}
 	}
 	if (ret) {
 	if (ret) {
 		NV_ERROR(drm, "Client allocation failed: %d\n", ret);
 		NV_ERROR(drm, "Client allocation failed: %d\n", ret);
@@ -154,6 +240,38 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 		goto done;
 		goto done;
 	}
 	}
 
 
+	ret = nvif_mclass(&cli->device.object, mmus);
+	if (ret < 0) {
+		NV_ERROR(drm, "No supported MMU class\n");
+		goto done;
+	}
+
+	ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
+	if (ret) {
+		NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+		goto done;
+	}
+
+	ret = nvif_mclass(&cli->mmu.object, vmms);
+	if (ret < 0) {
+		NV_ERROR(drm, "No supported VMM class\n");
+		goto done;
+	}
+
+	ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
+	if (ret) {
+		NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+		goto done;
+	}
+
+	ret = nvif_mclass(&cli->mmu.object, mems);
+	if (ret < 0) {
+		NV_ERROR(drm, "No supported MEM class\n");
+		goto done;
+	}
+
+	cli->mem = &mems[ret];
+	return 0;
 done:
 done:
 	if (ret)
 	if (ret)
 		nouveau_cli_fini(cli);
 		nouveau_cli_fini(cli);
@@ -433,6 +551,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 	dev->dev_private = drm;
 	dev->dev_private = drm;
 	drm->dev = dev;
 	drm->dev = dev;
 
 
+	ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+	if (ret)
+		return ret;
+
 	ret = nouveau_cli_init(drm, "DRM", &drm->client);
 	ret = nouveau_cli_init(drm, "DRM", &drm->client);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -456,21 +578,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 
 
 	nouveau_vga_init(drm);
 	nouveau_vga_init(drm);
 
 
-	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		if (!nvxx_device(&drm->client.device)->mmu) {
-			ret = -ENOSYS;
-			goto fail_device;
-		}
-
-		ret = nvkm_vm_new(nvxx_device(&drm->client.device),
-				  0, (1ULL << 40), 0x1000, NULL,
-				  &drm->client.vm);
-		if (ret)
-			goto fail_device;
-
-		nvxx_client(&drm->client.base)->vm = drm->client.vm;
-	}
-
 	ret = nouveau_ttm_init(drm);
 	ret = nouveau_ttm_init(drm);
 	if (ret)
 	if (ret)
 		goto fail_ttm;
 		goto fail_ttm;
@@ -516,8 +623,8 @@ fail_bios:
 	nouveau_ttm_fini(drm);
 	nouveau_ttm_fini(drm);
 fail_ttm:
 fail_ttm:
 	nouveau_vga_fini(drm);
 	nouveau_vga_fini(drm);
-fail_device:
 	nouveau_cli_fini(&drm->client);
 	nouveau_cli_fini(&drm->client);
+	nouveau_cli_fini(&drm->master);
 	kfree(drm);
 	kfree(drm);
 	return ret;
 	return ret;
 }
 }
@@ -550,6 +657,7 @@ nouveau_drm_unload(struct drm_device *dev)
 	if (drm->hdmi_device)
 	if (drm->hdmi_device)
 		pci_dev_put(drm->hdmi_device);
 		pci_dev_put(drm->hdmi_device);
 	nouveau_cli_fini(&drm->client);
 	nouveau_cli_fini(&drm->client);
+	nouveau_cli_fini(&drm->master);
 	kfree(drm);
 	kfree(drm);
 }
 }
 
 
@@ -618,7 +726,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
 	}
 	}
 
 
 	NV_DEBUG(drm, "suspending object tree...\n");
 	NV_DEBUG(drm, "suspending object tree...\n");
-	ret = nvif_client_suspend(&drm->client.base);
+	ret = nvif_client_suspend(&drm->master.base);
 	if (ret)
 	if (ret)
 		goto fail_client;
 		goto fail_client;
 
 
@@ -642,7 +750,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 
 	NV_DEBUG(drm, "resuming object tree...\n");
 	NV_DEBUG(drm, "resuming object tree...\n");
-	nvif_client_resume(&drm->client.base);
+	nvif_client_resume(&drm->master.base);
 
 
 	NV_DEBUG(drm, "resuming fence...\n");
 	NV_DEBUG(drm, "resuming fence...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
 	if (drm->fence && nouveau_fence(drm)->resume)
@@ -850,15 +958,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 
 
 	cli->base.super = false;
 	cli->base.super = false;
 
 
-	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
-				  (1ULL << 40), 0x1000, NULL, &cli->vm);
-		if (ret)
-			goto done;
-
-		nvxx_client(&cli->base)->vm = cli->vm;
-	}
-
 	fpriv->driver_priv = cli;
 	fpriv->driver_priv = cli;
 
 
 	mutex_lock(&drm->client.mutex);
 	mutex_lock(&drm->client.mutex);

+ 29 - 4
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -5,7 +5,7 @@
 #define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
 #define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
 
 
 #define DRIVER_NAME		"nouveau"
 #define DRIVER_NAME		"nouveau"
-#define DRIVER_DESC		"nVidia Riva/TNT/GeForce/Quadro/Tesla"
+#define DRIVER_DESC		"nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
 #define DRIVER_DATE		"20120801"
 #define DRIVER_DATE		"20120801"
 
 
 #define DRIVER_MAJOR		1
 #define DRIVER_MAJOR		1
@@ -42,6 +42,8 @@
 #include <nvif/client.h>
 #include <nvif/client.h>
 #include <nvif/device.h>
 #include <nvif/device.h>
 #include <nvif/ioctl.h>
 #include <nvif/ioctl.h>
+#include <nvif/mmu.h>
+#include <nvif/vmm.h>
 
 
 #include <drm/drmP.h>
 #include <drm/drmP.h>
 
 
@@ -61,6 +63,7 @@ struct platform_device;
 
 
 #include "nouveau_fence.h"
 #include "nouveau_fence.h"
 #include "nouveau_bios.h"
 #include "nouveau_bios.h"
+#include "nouveau_vmm.h"
 
 
 struct nouveau_drm_tile {
 struct nouveau_drm_tile {
 	struct nouveau_fence *fence;
 	struct nouveau_fence *fence;
@@ -86,19 +89,37 @@ enum nouveau_drm_handle {
 
 
 struct nouveau_cli {
 struct nouveau_cli {
 	struct nvif_client base;
 	struct nvif_client base;
-	struct drm_device *dev;
+	struct nouveau_drm *drm;
 	struct mutex mutex;
 	struct mutex mutex;
 
 
 	struct nvif_device device;
 	struct nvif_device device;
+	struct nvif_mmu mmu;
+	struct nouveau_vmm vmm;
+	const struct nvif_mclass *mem;
 
 
-	struct nvkm_vm *vm; /*XXX*/
 	struct list_head head;
 	struct list_head head;
 	void *abi16;
 	void *abi16;
 	struct list_head objects;
 	struct list_head objects;
 	struct list_head notifys;
 	struct list_head notifys;
 	char name[32];
 	char name[32];
+
+	struct work_struct work;
+	struct list_head worker;
+	struct mutex lock;
 };
 };
 
 
+struct nouveau_cli_work {
+	void (*func)(struct nouveau_cli_work *);
+	struct nouveau_cli *cli;
+	struct list_head head;
+
+	struct dma_fence *fence;
+	struct dma_fence_cb cb;
+};
+
+void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
+			    struct nouveau_cli_work *);
+
 static inline struct nouveau_cli *
 static inline struct nouveau_cli *
 nouveau_cli(struct drm_file *fpriv)
 nouveau_cli(struct drm_file *fpriv)
 {
 {
@@ -109,6 +130,7 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/device.h>
 #include <nvif/device.h>
 
 
 struct nouveau_drm {
 struct nouveau_drm {
+	struct nouveau_cli master;
 	struct nouveau_cli client;
 	struct nouveau_cli client;
 	struct drm_device *dev;
 	struct drm_device *dev;
 
 
@@ -133,6 +155,9 @@ struct nouveau_drm {
 		struct nouveau_channel *chan;
 		struct nouveau_channel *chan;
 		struct nvif_object copy;
 		struct nvif_object copy;
 		int mtrr;
 		int mtrr;
+		int type_vram;
+		int type_host;
+		int type_ncoh;
 	} ttm;
 	} ttm;
 
 
 	/* GEM interface support */
 	/* GEM interface support */
@@ -204,7 +229,7 @@ void nouveau_drm_device_remove(struct drm_device *dev);
 
 
 #define NV_PRINTK(l,c,f,a...) do {                                             \
 #define NV_PRINTK(l,c,f,a...) do {                                             \
 	struct nouveau_cli *_cli = (c);                                        \
 	struct nouveau_cli *_cli = (c);                                        \
-	dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a);                     \
+	dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a);                \
 } while(0)
 } while(0)
 #define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
 #define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
 #define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
 #define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)

+ 4 - 3
drivers/gpu/drm/nouveau/nouveau_fbcon.c

@@ -48,6 +48,7 @@
 #include "nouveau_bo.h"
 #include "nouveau_bo.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_chan.h"
 #include "nouveau_chan.h"
+#include "nouveau_vmm.h"
 
 
 #include "nouveau_crtc.h"
 #include "nouveau_crtc.h"
 
 
@@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 
 	chan = nouveau_nofbaccel ? NULL : drm->channel;
 	chan = nouveau_nofbaccel ? NULL : drm->channel;
 	if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 	if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
+		ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
 		if (ret) {
 		if (ret) {
 			NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
 			NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
 			chan = NULL;
 			chan = NULL;
@@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 
 out_unlock:
 out_unlock:
 	if (chan)
 	if (chan)
-		nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+		nouveau_vma_del(&fb->vma);
 	nouveau_bo_unmap(fb->nvbo);
 	nouveau_bo_unmap(fb->nvbo);
 out_unpin:
 out_unpin:
 	nouveau_bo_unpin(fb->nvbo);
 	nouveau_bo_unpin(fb->nvbo);
@@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 	drm_fb_helper_fini(&fbcon->helper);
 	drm_fb_helper_fini(&fbcon->helper);
 
 
 	if (nouveau_fb->nvbo) {
 	if (nouveau_fb->nvbo) {
-		nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+		nouveau_vma_del(&nouveau_fb->vma);
 		nouveau_bo_unmap(nouveau_fb->nvbo);
 		nouveau_bo_unmap(nouveau_fb->nvbo);
 		nouveau_bo_unpin(nouveau_fb->nvbo);
 		nouveau_bo_unpin(nouveau_fb->nvbo);
 		drm_framebuffer_unreference(&nouveau_fb->base);
 		drm_framebuffer_unreference(&nouveau_fb->base);

+ 0 - 58
drivers/gpu/drm/nouveau/nouveau_fence.c

@@ -199,62 +199,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
 	WARN_ON(ret);
 	WARN_ON(ret);
 }
 }
 
 
-struct nouveau_fence_work {
-	struct work_struct work;
-	struct dma_fence_cb cb;
-	void (*func)(void *);
-	void *data;
-};
-
-static void
-nouveau_fence_work_handler(struct work_struct *kwork)
-{
-	struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
-	work->func(work->data);
-	kfree(work);
-}
-
-static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
-	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
-
-	schedule_work(&work->work);
-}
-
-void
-nouveau_fence_work(struct dma_fence *fence,
-		   void (*func)(void *), void *data)
-{
-	struct nouveau_fence_work *work;
-
-	if (dma_fence_is_signaled(fence))
-		goto err;
-
-	work = kmalloc(sizeof(*work), GFP_KERNEL);
-	if (!work) {
-		/*
-		 * this might not be a nouveau fence any more,
-		 * so force a lazy wait here
-		 */
-		WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
-					   true, false));
-		goto err;
-	}
-
-	INIT_WORK(&work->work, nouveau_fence_work_handler);
-	work->func = func;
-	work->data = data;
-
-	if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
-		goto err_free;
-	return;
-
-err_free:
-	kfree(work);
-err:
-	func(data);
-}
-
 int
 int
 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 {
 {
@@ -474,8 +418,6 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
 	if (!fence)
 	if (!fence)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	fence->sysmem = sysmem;
-
 	ret = nouveau_fence_emit(fence, chan);
 	ret = nouveau_fence_emit(fence, chan);
 	if (ret)
 	if (ret)
 		nouveau_fence_unref(&fence);
 		nouveau_fence_unref(&fence);

+ 1 - 6
drivers/gpu/drm/nouveau/nouveau_fence.h

@@ -12,8 +12,6 @@ struct nouveau_fence {
 
 
 	struct list_head head;
 	struct list_head head;
 
 
-	bool sysmem;
-
 	struct nouveau_channel __rcu *channel;
 	struct nouveau_channel __rcu *channel;
 	unsigned long timeout;
 	unsigned long timeout;
 };
 };
@@ -24,7 +22,6 @@ void nouveau_fence_unref(struct nouveau_fence **);
 
 
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 bool nouveau_fence_done(struct nouveau_fence *);
 bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
 int  nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
 
 
@@ -90,14 +87,12 @@ int nouveau_flip_complete(struct nvif_notify *);
 
 
 struct nv84_fence_chan {
 struct nv84_fence_chan {
 	struct nouveau_fence_chan base;
 	struct nouveau_fence_chan base;
-	struct nvkm_vma vma;
-	struct nvkm_vma vma_gart;
+	struct nouveau_vma *vma;
 };
 };
 
 
 struct nv84_fence_priv {
 struct nv84_fence_priv {
 	struct nouveau_fence_priv base;
 	struct nouveau_fence_priv base;
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo;
-	struct nouveau_bo *bo_gart;
 	u32 *suspend;
 	u32 *suspend;
 	struct mutex mutex;
 	struct mutex mutex;
 };
 };

+ 64 - 59
drivers/gpu/drm/nouveau/nouveau_gem.c

@@ -31,6 +31,10 @@
 
 
 #include "nouveau_ttm.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 #include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
 
 
 void
 void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 nouveau_gem_object_del(struct drm_gem_object *gem)
@@ -64,66 +68,61 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
-	struct nvkm_vma *vma;
 	struct device *dev = drm->dev->dev;
 	struct device *dev = drm->dev->dev;
+	struct nouveau_vma *vma;
 	int ret;
 	int ret;
 
 
-	if (!cli->vm)
+	if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 		return 0;
 		return 0;
 
 
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	vma = nouveau_bo_vma_find(nvbo, cli->vm);
-	if (!vma) {
-		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-		if (!vma) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		ret = pm_runtime_get_sync(dev);
-		if (ret < 0 && ret != -EACCES) {
-			kfree(vma);
-			goto out;
-		}
-
-		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
-		if (ret)
-			kfree(vma);
-
-		pm_runtime_mark_last_busy(dev);
-		pm_runtime_put_autosuspend(dev);
-	} else {
-		vma->refcount++;
-	}
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0 && ret != -EACCES)
+		goto out;
 
 
+	ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
 out:
 out:
 	ttm_bo_unreserve(&nvbo->bo);
 	ttm_bo_unreserve(&nvbo->bo);
 	return ret;
 	return ret;
 }
 }
 
 
+struct nouveau_gem_object_unmap {
+	struct nouveau_cli_work work;
+	struct nouveau_vma *vma;
+};
+
 static void
 static void
-nouveau_gem_object_delete(void *data)
+nouveau_gem_object_delete(struct nouveau_vma *vma)
 {
 {
-	struct nvkm_vma *vma = data;
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
-	kfree(vma);
+	nouveau_vma_del(&vma);
 }
 }
 
 
 static void
 static void
-nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
+nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
+{
+	struct nouveau_gem_object_unmap *work =
+		container_of(w, typeof(*work), work);
+	nouveau_gem_object_delete(work->vma);
+	kfree(work);
+}
+
+static void
+nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 {
 {
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
+	struct nouveau_gem_object_unmap *work;
 	struct dma_fence *fence = NULL;
 	struct dma_fence *fence = NULL;
 
 
 	fobj = reservation_object_get_list(resv);
 	fobj = reservation_object_get_list(resv);
 
 
-	list_del(&vma->head);
+	list_del_init(&vma->head);
 
 
 	if (fobj && fobj->shared_count > 1)
 	if (fobj && fobj->shared_count > 1)
 		ttm_bo_wait(&nvbo->bo, false, false);
 		ttm_bo_wait(&nvbo->bo, false, false);
@@ -133,14 +132,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
 	else
 	else
 		fence = reservation_object_get_excl(nvbo->bo.resv);
 		fence = reservation_object_get_excl(nvbo->bo.resv);
 
 
-	if (fence && mapped) {
-		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
-	} else {
-		if (mapped)
-			nvkm_vm_unmap(vma);
-		nvkm_vm_put(vma);
-		kfree(vma);
+	if (!fence || !mapped) {
+		nouveau_gem_object_delete(vma);
+		return;
+	}
+
+	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
+		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+		nouveau_gem_object_delete(vma);
+		return;
 	}
 	}
+
+	work->work.func = nouveau_gem_object_delete_work;
+	work->vma = vma;
+	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 }
 }
 
 
 void
 void
@@ -150,19 +155,19 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct device *dev = drm->dev->dev;
 	struct device *dev = drm->dev->dev;
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 	int ret;
 	int ret;
 
 
-	if (!cli->vm)
+	if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 		return;
 		return;
 
 
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 	if (ret)
 		return;
 		return;
 
 
-	vma = nouveau_bo_vma_find(nvbo, cli->vm);
+	vma = nouveau_vma_find(nvbo, &cli->vmm);
 	if (vma) {
 	if (vma) {
-		if (--vma->refcount == 0) {
+		if (--vma->refs == 0) {
 			ret = pm_runtime_get_sync(dev);
 			ret = pm_runtime_get_sync(dev);
 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 				nouveau_gem_object_unmap(nvbo, vma);
 				nouveau_gem_object_unmap(nvbo, vma);
@@ -179,7 +184,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 		uint32_t tile_mode, uint32_t tile_flags,
 		uint32_t tile_mode, uint32_t tile_flags,
 		struct nouveau_bo **pnvbo)
 		struct nouveau_bo **pnvbo)
 {
 {
-	struct nouveau_drm *drm = nouveau_drm(cli->dev);
+	struct nouveau_drm *drm = cli->drm;
 	struct nouveau_bo *nvbo;
 	struct nouveau_bo *nvbo;
 	u32 flags = 0;
 	u32 flags = 0;
 	int ret;
 	int ret;
@@ -227,7 +232,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 {
 {
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
-	struct nvkm_vma *vma;
+	struct nouveau_vma *vma;
 
 
 	if (is_power_of_2(nvbo->valid_domains))
 	if (is_power_of_2(nvbo->valid_domains))
 		rep->domain = nvbo->valid_domains;
 		rep->domain = nvbo->valid_domains;
@@ -236,18 +241,25 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 	else
 	else
 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 	rep->offset = nvbo->bo.offset;
 	rep->offset = nvbo->bo.offset;
-	if (cli->vm) {
-		vma = nouveau_bo_vma_find(nvbo, cli->vm);
+	if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+		vma = nouveau_vma_find(nvbo, &cli->vmm);
 		if (!vma)
 		if (!vma)
 			return -EINVAL;
 			return -EINVAL;
 
 
-		rep->offset = vma->offset;
+		rep->offset = vma->addr;
 	}
 	}
 
 
 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
-	rep->tile_mode = nvbo->tile_mode;
-	rep->tile_flags = nvbo->tile_flags;
+	rep->tile_mode = nvbo->mode;
+	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+		rep->tile_flags |= nvbo->kind << 8;
+	else
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
+	else
+		rep->tile_flags |= nvbo->zeta;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -255,18 +267,11 @@ int
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
 		      struct drm_file *file_priv)
 {
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
-	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 	struct drm_nouveau_gem_new *req = data;
 	struct drm_nouveau_gem_new *req = data;
 	struct nouveau_bo *nvbo = NULL;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
-		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
-		return -EINVAL;
-	}
-
 	ret = nouveau_gem_new(cli, req->info.size, req->align,
 	ret = nouveau_gem_new(cli, req->info.size, req->align,
 			      req->info.domain, req->info.tile_mode,
 			      req->info.domain, req->info.tile_mode,
 			      req->info.tile_flags, &nvbo);
 			      req->info.tile_flags, &nvbo);
@@ -791,7 +796,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 				bo[push[i].bo_index].user_priv;
 				bo[push[i].bo_index].user_priv;
 			uint32_t cmd;
 			uint32_t cmd;
 
 
-			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
+			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 			cmd |= 0x20000000;
 			cmd |= 0x20000000;
 			if (unlikely(cmd != req->suffix0)) {
 			if (unlikely(cmd != req->suffix0)) {
 				if (!nvbo->kmap.virtual) {
 				if (!nvbo->kmap.virtual) {
@@ -843,7 +848,7 @@ out_next:
 		req->suffix1 = 0x00000000;
 		req->suffix1 = 0x00000000;
 	} else {
 	} else {
 		req->suffix0 = 0x20000000 |
 		req->suffix0 = 0x20000000 |
-			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
+			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 		req->suffix1 = 0x00000000;
 		req->suffix1 = 0x00000000;
 	}
 	}
 
 

+ 0 - 3
drivers/gpu/drm/nouveau/nouveau_gem.h

@@ -6,9 +6,6 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_bo.h"
 #include "nouveau_bo.h"
 
 
-#define nouveau_bo_tile_layout(nvbo)				\
-	((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
-
 static inline struct nouveau_bo *
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 nouveau_gem_object(struct drm_gem_object *gem)
 {
 {

+ 198 - 0
drivers/gpu/drm/nouveau/nouveau_mem.c

@@ -0,0 +1,198 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_mem.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+
+int
+nouveau_mem_map(struct nouveau_mem *mem,
+		struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+	union {
+		struct nv50_vmm_map_v0 nv50;
+		struct gf100_vmm_map_v0 gf100;
+	} args;
+	u32 argc = 0;
+	bool super;
+	int ret;
+
+	switch (vmm->object.oclass) {
+	case NVIF_CLASS_VMM_NV04:
+		break;
+	case NVIF_CLASS_VMM_NV50:
+		args.nv50.version = 0;
+		args.nv50.ro = 0;
+		args.nv50.priv = 0;
+		args.nv50.kind = mem->kind;
+		args.nv50.comp = mem->comp;
+		argc = sizeof(args.nv50);
+		break;
+	case NVIF_CLASS_VMM_GF100:
+	case NVIF_CLASS_VMM_GM200:
+	case NVIF_CLASS_VMM_GP100:
+		args.gf100.version = 0;
+		if (mem->mem.type & NVIF_MEM_VRAM)
+			args.gf100.vol = 0;
+		else
+			args.gf100.vol = 1;
+		args.gf100.ro = 0;
+		args.gf100.priv = 0;
+		args.gf100.kind = mem->kind;
+		argc = sizeof(args.gf100);
+		break;
+	default:
+		WARN_ON(1);
+		return -ENOSYS;
+	}
+
+	super = vmm->object.client->super;
+	vmm->object.client->super = true;
+	ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
+			   &mem->mem, 0);
+	vmm->object.client->super = super;
+	return ret;
+}
+
+void
+nouveau_mem_fini(struct nouveau_mem *mem)
+{
+	nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
+	nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
+	mutex_lock(&mem->cli->drm->master.lock);
+	nvif_mem_fini(&mem->mem);
+	mutex_unlock(&mem->cli->drm->master.lock);
+}
+
+int
+nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
+{
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	struct nouveau_cli *cli = mem->cli;
+	struct nouveau_drm *drm = cli->drm;
+	struct nvif_mmu *mmu = &cli->mmu;
+	struct nvif_mem_ram_v0 args = {};
+	bool super = cli->base.super;
+	u8 type;
+	int ret;
+
+	if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+		type = drm->ttm.type_ncoh;
+	else
+		type = drm->ttm.type_host;
+
+	if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
+		mem->comp = mem->kind = 0;
+	if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
+		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+			mem->kind = mmu->kind[mem->kind];
+		mem->comp = 0;
+	}
+
+	if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
+	else            args.dma = tt->dma_address;
+
+	mutex_lock(&drm->master.lock);
+	cli->base.super = true;
+	ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
+				 reg->num_pages << PAGE_SHIFT,
+				 &args, sizeof(args), &mem->mem);
+	cli->base.super = super;
+	mutex_unlock(&drm->master.lock);
+	return ret;
+}
+
+int
+nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
+{
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	struct nouveau_cli *cli = mem->cli;
+	struct nouveau_drm *drm = cli->drm;
+	struct nvif_mmu *mmu = &cli->mmu;
+	bool super = cli->base.super;
+	u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
+	int ret;
+
+	mutex_lock(&drm->master.lock);
+	cli->base.super = true;
+	switch (cli->mem->oclass) {
+	case NVIF_CLASS_MEM_GF100:
+		ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+					 drm->ttm.type_vram, page, size,
+					 &(struct gf100_mem_v0) {
+						.contig = contig,
+					 }, sizeof(struct gf100_mem_v0),
+					 &mem->mem);
+		break;
+	case NVIF_CLASS_MEM_NV50:
+		ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+					 drm->ttm.type_vram, page, size,
+					 &(struct nv50_mem_v0) {
+						.bankswz = mmu->kind[mem->kind] == 2,
+						.contig = contig,
+					 }, sizeof(struct nv50_mem_v0),
+					 &mem->mem);
+		break;
+	default:
+		ret = -ENOSYS;
+		WARN_ON(1);
+		break;
+	}
+	cli->base.super = super;
+	mutex_unlock(&drm->master.lock);
+
+	reg->start = mem->mem.addr >> PAGE_SHIFT;
+	return ret;
+}
+
+void
+nouveau_mem_del(struct ttm_mem_reg *reg)
+{
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	nouveau_mem_fini(mem);
+	kfree(reg->mm_node);
+	reg->mm_node = NULL;
+}
+
+int
+nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+		struct ttm_mem_reg *reg)
+{
+	struct nouveau_mem *mem;
+
+	if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+		return -ENOMEM;
+	mem->cli = cli;
+	mem->kind = kind;
+	mem->comp = comp;
+
+	reg->mm_node = mem;
+	return 0;
+}

+ 30 - 0
drivers/gpu/drm/nouveau/nouveau_mem.h

@@ -0,0 +1,30 @@
+#ifndef __NOUVEAU_MEM_H__
+#define __NOUVEAU_MEM_H__
+#include <drm/ttm/ttm_bo_api.h>
+struct ttm_dma_tt;
+
+#include <nvif/mem.h>
+#include <nvif/vmm.h>
+
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_mem_reg *reg)
+{
+	return reg->mm_node;
+}
+
+struct nouveau_mem {
+	struct nouveau_cli *cli;
+	u8 kind;
+	u8 comp;
+	struct nvif_mem mem;
+	struct nvif_vma vma[2];
+};
+
+int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+		    struct ttm_mem_reg *);
+void nouveau_mem_del(struct ttm_mem_reg *);
+int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
+int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
+void nouveau_mem_fini(struct nouveau_mem *);
+int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+#endif

+ 23 - 31
drivers/gpu/drm/nouveau/nouveau_sgdma.c

@@ -2,6 +2,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 
 
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
+#include "nouveau_mem.h"
 #include "nouveau_ttm.h"
 #include "nouveau_ttm.h"
 
 
 struct nouveau_sgdma_be {
 struct nouveau_sgdma_be {
@@ -9,7 +10,7 @@ struct nouveau_sgdma_be {
 	 * nouve_bo.c works properly, otherwise have to move them here
 	 * nouve_bo.c works properly, otherwise have to move them here
 	 */
 	 */
 	struct ttm_dma_tt ttm;
 	struct ttm_dma_tt ttm;
-	struct nvkm_mem *node;
+	struct nouveau_mem *mem;
 };
 };
 
 
 static void
 static void
@@ -27,19 +28,20 @@ static int
 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
 {
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct nvkm_mem *node = reg->mm_node;
-
-	if (ttm->sg) {
-		node->sg    = ttm->sg;
-		node->pages = NULL;
-	} else {
-		node->sg    = NULL;
-		node->pages = nvbe->ttm.dma_address;
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	int ret;
+
+	ret = nouveau_mem_host(reg, &nvbe->ttm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+	if (ret) {
+		nouveau_mem_fini(mem);
+		return ret;
 	}
 	}
-	node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
 
 
-	nvkm_vm_map(&node->vma[0], node);
-	nvbe->node = node;
+	nvbe->mem = mem;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -47,7 +49,7 @@ static int
 nv04_sgdma_unbind(struct ttm_tt *ttm)
 nv04_sgdma_unbind(struct ttm_tt *ttm)
 {
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	nvkm_vm_unmap(&nvbe->node->vma[0]);
+	nouveau_mem_fini(nvbe->mem);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -61,30 +63,20 @@ static int
 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
 {
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct nvkm_mem *node = reg->mm_node;
-
-	/* noop: bound in move_notify() */
-	if (ttm->sg) {
-		node->sg    = ttm->sg;
-		node->pages = NULL;
-	} else {
-		node->sg    = NULL;
-		node->pages = nvbe->ttm.dma_address;
-	}
-	node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
-	return 0;
-}
+	struct nouveau_mem *mem = nouveau_mem(reg);
+	int ret;
 
 
-static int
-nv50_sgdma_unbind(struct ttm_tt *ttm)
-{
-	/* noop: unbound in move_notify() */
+	ret = nouveau_mem_host(reg, &nvbe->ttm);
+	if (ret)
+		return ret;
+
+	nvbe->mem = mem;
 	return 0;
 	return 0;
 }
 }
 
 
 static struct ttm_backend_func nv50_sgdma_backend = {
 static struct ttm_backend_func nv50_sgdma_backend = {
 	.bind			= nv50_sgdma_bind,
 	.bind			= nv50_sgdma_bind,
-	.unbind			= nv50_sgdma_unbind,
+	.unbind			= nv04_sgdma_unbind,
 	.destroy		= nouveau_sgdma_destroy
 	.destroy		= nouveau_sgdma_destroy
 };
 };
 
 

+ 88 - 192
drivers/gpu/drm/nouveau/nouveau_ttm.c

@@ -23,53 +23,37 @@
  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
  */
-
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
-#include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 #include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_ttm.h"
 
 
 #include <drm/drm_legacy.h>
 #include <drm/drm_legacy.h>
 
 
 #include <core/tegra.h>
 #include <core/tegra.h>
 
 
 static int
 static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
 {
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
-	man->priv = fb;
 	return 0;
 	return 0;
 }
 }
 
 
 static int
 static int
-nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+nouveau_manager_fini(struct ttm_mem_type_manager *man)
 {
 {
-	man->priv = NULL;
 	return 0;
 	return 0;
 }
 }
 
 
-static inline void
-nvkm_mem_node_cleanup(struct nvkm_mem *node)
+static void
+nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
 {
 {
-	if (node->vma[0].node) {
-		nvkm_vm_unmap(&node->vma[0]);
-		nvkm_vm_put(&node->vma[0]);
-	}
-
-	if (node->vma[1].node) {
-		nvkm_vm_unmap(&node->vma[1]);
-		nvkm_vm_put(&node->vma[1]);
-	}
+	nouveau_mem_del(reg);
 }
 }
 
 
 static void
 static void
-nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
-			 struct ttm_mem_reg *reg)
+nouveau_manager_debug(struct ttm_mem_type_manager *man,
+		      struct drm_printer *printer)
 {
 {
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
-	nvkm_mem_node_cleanup(reg->mm_node);
-	ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
 }
 }
 
 
 static int
 static int
@@ -78,192 +62,105 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 			 const struct ttm_place *place,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *reg)
 			 struct ttm_mem_reg *reg)
 {
 {
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvkm_mem *node;
-	u32 size_nc = 0;
+	struct nouveau_drm *drm = nvbo->cli->drm;
+	struct nouveau_mem *mem;
 	int ret;
 	int ret;
 
 
 	if (drm->client.device.info.ram_size == 0)
 	if (drm->client.device.info.ram_size == 0)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
-		size_nc = 1 << nvbo->page_shift;
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	mem = nouveau_mem(reg);
+	if (ret)
+		return ret;
 
 
-	ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
-			     reg->page_alignment << PAGE_SHIFT, size_nc,
-			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
 	if (ret) {
 	if (ret) {
-		reg->mm_node = NULL;
-		return (ret == -ENOSPC) ? 0 : ret;
+		nouveau_mem_del(reg);
+		if (ret == -ENOSPC) {
+			reg->mm_node = NULL;
+			return 0;
+		}
+		return ret;
 	}
 	}
 
 
-	node->page_shift = nvbo->page_shift;
-
-	reg->mm_node = node;
-	reg->start   = node->offset >> PAGE_SHIFT;
 	return 0;
 	return 0;
 }
 }
 
 
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
-	.init = nouveau_vram_manager_init,
-	.takedown = nouveau_vram_manager_fini,
+	.init = nouveau_manager_init,
+	.takedown = nouveau_manager_fini,
 	.get_node = nouveau_vram_manager_new,
 	.get_node = nouveau_vram_manager_new,
-	.put_node = nouveau_vram_manager_del,
+	.put_node = nouveau_manager_del,
+	.debug = nouveau_manager_debug,
 };
 };
 
 
-static int
-nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
-	return 0;
-}
-
-static int
-nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
-	return 0;
-}
-
-static void
-nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
-			 struct ttm_mem_reg *reg)
-{
-	nvkm_mem_node_cleanup(reg->mm_node);
-	kfree(reg->mm_node);
-	reg->mm_node = NULL;
-}
-
 static int
 static int
 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 struct ttm_buffer_object *bo,
 			 const struct ttm_place *place,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *reg)
 			 struct ttm_mem_reg *reg)
 {
 {
-	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvkm_mem *node;
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node)
-		return -ENOMEM;
+	struct nouveau_drm *drm = nvbo->cli->drm;
+	struct nouveau_mem *mem;
+	int ret;
 
 
-	node->page_shift = 12;
-
-	switch (drm->client.device.info.family) {
-	case NV_DEVICE_INFO_V0_TNT:
-	case NV_DEVICE_INFO_V0_CELSIUS:
-	case NV_DEVICE_INFO_V0_KELVIN:
-	case NV_DEVICE_INFO_V0_RANKINE:
-	case NV_DEVICE_INFO_V0_CURIE:
-		break;
-	case NV_DEVICE_INFO_V0_TESLA:
-		if (drm->client.device.info.chipset != 0x50)
-			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
-		break;
-	case NV_DEVICE_INFO_V0_FERMI:
-	case NV_DEVICE_INFO_V0_KEPLER:
-	case NV_DEVICE_INFO_V0_MAXWELL:
-	case NV_DEVICE_INFO_V0_PASCAL:
-		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
-		break;
-	default:
-		NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
-			drm->client.device.info.family);
-		break;
-	}
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	mem = nouveau_mem(reg);
+	if (ret)
+		return ret;
 
 
-	reg->mm_node = node;
-	reg->start   = 0;
+	reg->start = 0;
 	return 0;
 	return 0;
 }
 }
 
 
-static void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
-			   struct drm_printer *printer)
-{
-}
-
 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
-	.init = nouveau_gart_manager_init,
-	.takedown = nouveau_gart_manager_fini,
+	.init = nouveau_manager_init,
+	.takedown = nouveau_manager_fini,
 	.get_node = nouveau_gart_manager_new,
 	.get_node = nouveau_gart_manager_new,
-	.put_node = nouveau_gart_manager_del,
-	.debug = nouveau_gart_manager_debug
+	.put_node = nouveau_manager_del,
+	.debug = nouveau_manager_debug
 };
 };
 
 
-/*XXX*/
-#include <subdev/mmu/nv04.h>
-static int
-nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
-	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
-	struct nv04_mmu *priv = (void *)mmu;
-	struct nvkm_vm *vm = NULL;
-	nvkm_vm_ref(priv->vm, &vm, NULL);
-	man->priv = vm;
-	return 0;
-}
-
-static int
-nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
-	struct nvkm_vm *vm = man->priv;
-	nvkm_vm_ref(NULL, &vm, NULL);
-	man->priv = NULL;
-	return 0;
-}
-
-static void
-nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
-{
-	struct nvkm_mem *node = reg->mm_node;
-	if (node->vma[0].node)
-		nvkm_vm_put(&node->vma[0]);
-	kfree(reg->mm_node);
-	reg->mm_node = NULL;
-}
-
 static int
 static int
 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
 		      struct ttm_buffer_object *bo,
 		      struct ttm_buffer_object *bo,
 		      const struct ttm_place *place,
 		      const struct ttm_place *place,
 		      struct ttm_mem_reg *reg)
 		      struct ttm_mem_reg *reg)
 {
 {
-	struct nvkm_mem *node;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_drm *drm = nvbo->cli->drm;
+	struct nouveau_mem *mem;
 	int ret;
 	int ret;
 
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node)
-		return -ENOMEM;
-
-	node->page_shift = 12;
+	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+	mem = nouveau_mem(reg);
+	if (ret)
+		return ret;
 
 
-	ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
-			  NV_MEM_ACCESS_RW, &node->vma[0]);
+	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+			   reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
 	if (ret) {
 	if (ret) {
-		kfree(node);
+		nouveau_mem_del(reg);
+		if (ret == -ENOSPC) {
+			reg->mm_node = NULL;
+			return 0;
+		}
 		return ret;
 		return ret;
 	}
 	}
 
 
-	reg->mm_node = node;
-	reg->start   = node->vma[0].offset >> PAGE_SHIFT;
+	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
 	return 0;
 	return 0;
 }
 }
 
 
-static void
-nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
-			struct drm_printer *printer)
-{
-}
-
 const struct ttm_mem_type_manager_func nv04_gart_manager = {
 const struct ttm_mem_type_manager_func nv04_gart_manager = {
-	.init = nv04_gart_manager_init,
-	.takedown = nv04_gart_manager_fini,
+	.init = nouveau_manager_init,
+	.takedown = nouveau_manager_fini,
 	.get_node = nv04_gart_manager_new,
 	.get_node = nv04_gart_manager_new,
-	.put_node = nv04_gart_manager_del,
-	.debug = nv04_gart_manager_debug
+	.put_node = nouveau_manager_del,
+	.debug = nouveau_manager_debug
 };
 };
 
 
 int
 int
@@ -343,44 +240,43 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 {
 {
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct nvkm_pci *pci = device->pci;
 	struct nvkm_pci *pci = device->pci;
+	struct nvif_mmu *mmu = &drm->client.mmu;
 	struct drm_device *dev = drm->dev;
 	struct drm_device *dev = drm->dev;
-	u8 bits;
-	int ret;
+	int typei, ret;
 
 
-	if (pci && pci->agp.bridge) {
-		drm->agp.bridge = pci->agp.bridge;
-		drm->agp.base = pci->agp.base;
-		drm->agp.size = pci->agp.size;
-		drm->agp.cma = pci->agp.cma;
-	}
+	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
+						   NVIF_MEM_COHERENT);
+	if (typei < 0)
+		return -ENOSYS;
 
 
-	bits = nvxx_mmu(&drm->client.device)->dma_bits;
-	if (nvxx_device(&drm->client.device)->func->pci) {
-		if (drm->agp.bridge)
-			bits = 32;
-	} else if (device->func->tegra) {
-		struct nvkm_device_tegra *tegra = device->func->tegra(device);
+	drm->ttm.type_host = typei;
 
 
-		/*
-		 * If the platform can use a IOMMU, then the addressable DMA
-		 * space is constrained by the IOMMU bit
-		 */
-		if (tegra->func->iommu_bit)
-			bits = min(bits, tegra->func->iommu_bit);
+	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+	if (typei < 0)
+		return -ENOSYS;
 
 
-	}
+	drm->ttm.type_ncoh = typei;
 
 
-	ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
-	if (ret && bits != 32) {
-		bits = 32;
-		ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
+	if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
+	    drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+		typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
+					   NVIF_MEM_KIND |
+					   NVIF_MEM_COMP |
+					   NVIF_MEM_DISP);
+		if (typei < 0)
+			return -ENOSYS;
+
+		drm->ttm.type_vram = typei;
+	} else {
+		drm->ttm.type_vram = -1;
 	}
 	}
-	if (ret)
-		return ret;
 
 
-	ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
-	if (ret)
-		dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
+	if (pci && pci->agp.bridge) {
+		drm->agp.bridge = pci->agp.bridge;
+		drm->agp.base = pci->agp.base;
+		drm->agp.size = pci->agp.size;
+		drm->agp.cma = pci->agp.cma;
+	}
 
 
 	ret = nouveau_ttm_global_init(drm);
 	ret = nouveau_ttm_global_init(drm);
 	if (ret)
 	if (ret)
@@ -391,7 +287,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 				  &nouveau_bo_driver,
 				  &nouveau_bo_driver,
 				  dev->anon_inode->i_mapping,
 				  dev->anon_inode->i_mapping,
 				  DRM_FILE_PAGE_OFFSET,
 				  DRM_FILE_PAGE_OFFSET,
-				  bits <= 32 ? true : false);
+				  drm->client.mmu.dmabits <= 32 ? true : false);
 	if (ret) {
 	if (ret) {
 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
 		return ret;
 		return ret;
@@ -415,7 +311,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 
 
 	/* GART init */
 	/* GART init */
 	if (!drm->agp.bridge) {
 	if (!drm->agp.bridge) {
-		drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
+		drm->gem.gart_available = drm->client.vmm.vmm.limit;
 	} else {
 	} else {
 		drm->gem.gart_available = drm->agp.size;
 		drm->gem.gart_available = drm->agp.size;
 	}
 	}

+ 135 - 0
drivers/gpu/drm/nouveau/nouveau_vmm.c

@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_vmm.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+#include "nouveau_mem.h"
+
+void
+nouveau_vma_unmap(struct nouveau_vma *vma)
+{
+	if (vma->mem) {
+		nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
+		vma->mem = NULL;
+	}
+}
+
+int
+nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
+{
+	struct nvif_vma tmp = { .addr = vma->addr };
+	int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
+	if (ret)
+		return ret;
+	vma->mem = mem;
+	return 0;
+}
+
+struct nouveau_vma *
+nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
+{
+	struct nouveau_vma *vma;
+
+	list_for_each_entry(vma, &nvbo->vma_list, head) {
+		if (vma->vmm == vmm)
+			return vma;
+	}
+
+	return NULL;
+}
+
+void
+nouveau_vma_del(struct nouveau_vma **pvma)
+{
+	struct nouveau_vma *vma = *pvma;
+	if (vma && --vma->refs <= 0) {
+		if (likely(vma->addr != ~0ULL)) {
+			struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
+			nvif_vmm_put(&vma->vmm->vmm, &tmp);
+		}
+		list_del(&vma->head);
+		*pvma = NULL;
+		kfree(*pvma);
+	}
+}
+
+int
+nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+		struct nouveau_vma **pvma)
+{
+	struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+	struct nouveau_vma *vma;
+	struct nvif_vma tmp;
+	int ret;
+
+	if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
+		vma->refs++;
+		return 0;
+	}
+
+	if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
+		return -ENOMEM;
+	vma->vmm = vmm;
+	vma->refs = 1;
+	vma->addr = ~0ULL;
+	vma->mem = NULL;
+	list_add_tail(&vma->head, &nvbo->vma_list);
+
+	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+	    mem->mem.page == nvbo->page) {
+		ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
+				   mem->mem.size, &tmp);
+		if (ret)
+			goto done;
+
+		vma->addr = tmp.addr;
+		ret = nouveau_vma_map(vma, mem);
+	} else {
+		ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+				   mem->mem.size, &tmp);
+		vma->addr = tmp.addr;
+	}
+
+done:
+	if (ret)
+		nouveau_vma_del(pvma);
+	return ret;
+}
+
+void
+nouveau_vmm_fini(struct nouveau_vmm *vmm)
+{
+	nvif_vmm_fini(&vmm->vmm);
+	vmm->cli = NULL;
+}
+
+int
+nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
+{
+	int ret = nvif_vmm_init(&cli->mmu, oclass, PAGE_SIZE, 0, NULL, 0,
+				&vmm->vmm);
+	if (ret)
+		return ret;
+
+	vmm->cli = cli;
+	return 0;
+}

+ 31 - 0
drivers/gpu/drm/nouveau/nouveau_vmm.h

@@ -0,0 +1,31 @@
+#ifndef __NOUVEAU_VMA_H__
+#define __NOUVEAU_VMA_H__
+#include <nvif/vmm.h>
+struct nouveau_bo;
+struct nouveau_mem;
+
+struct nouveau_vma {
+	struct nouveau_vmm *vmm;
+	int refs;
+	struct list_head head;
+	u64 addr;
+
+	struct nouveau_mem *mem;
+};
+
+struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
+int nouveau_vma_new(struct nouveau_bo *, struct nouveau_vmm *,
+		    struct nouveau_vma **);
+void nouveau_vma_del(struct nouveau_vma **);
+int nouveau_vma_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vma_unmap(struct nouveau_vma *);
+
+struct nouveau_vmm {
+	struct nouveau_cli *cli;
+	struct nvif_vmm vmm;
+	struct nvkm_vm *vm;
+};
+
+int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
+void nouveau_vmm_fini(struct nouveau_vmm *);
+#endif

+ 6 - 6
drivers/gpu/drm/nouveau/nv50_display.c

@@ -318,7 +318,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
 				ret = nvif_object_init(disp, 0, oclass[0],
 				ret = nvif_object_init(disp, 0, oclass[0],
 						       data, size, &chan->user);
 						       data, size, &chan->user);
 				if (ret == 0)
 				if (ret == 0)
-					nvif_object_map(&chan->user);
+					nvif_object_map(&chan->user, NULL, 0);
 				nvif_object_sclass_put(&sclass);
 				nvif_object_sclass_put(&sclass);
 				return ret;
 				return ret;
 			}
 			}
@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
 {
 {
 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
 	struct nv50_dmac_ctxdma *ctxdma;
 	struct nv50_dmac_ctxdma *ctxdma;
-	const u8    kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+	const u8    kind = fb->nvbo->kind;
 	const u32 handle = 0xfb000000 | kind;
 	const u32 handle = 0xfb000000 | kind;
 	struct {
 	struct {
 		struct nv_dma_v0 base;
 		struct nv_dma_v0 base;
@@ -510,6 +510,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	int ret;
 	int ret;
 
 
 	mutex_init(&dmac->lock);
 	mutex_init(&dmac->lock);
+	INIT_LIST_HEAD(&dmac->ctxdma);
 
 
 	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
 	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
 				       &dmac->handle, GFP_KERNEL);
 				       &dmac->handle, GFP_KERNEL);
@@ -556,7 +557,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	INIT_LIST_HEAD(&dmac->ctxdma);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 
 
 	asyw->image.w = fb->base.width;
 	asyw->image.w = fb->base.width;
 	asyw->image.h = fb->base.height;
 	asyw->image.h = fb->base.height;
-	asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+	asyw->image.kind = fb->nvbo->kind;
 
 
 	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
 	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
 		asyw->interval = 0;
 		asyw->interval = 0;
@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 	if (asyw->image.kind) {
 	if (asyw->image.kind) {
 		asyw->image.layout = 0;
 		asyw->image.layout = 0;
 		if (drm->client.device.info.chipset >= 0xc0)
 		if (drm->client.device.info.chipset >= 0xc0)
-			asyw->image.block = fb->nvbo->tile_mode >> 4;
+			asyw->image.block = fb->nvbo->mode >> 4;
 		else
 		else
-			asyw->image.block = fb->nvbo->tile_mode;
+			asyw->image.block = fb->nvbo->mode;
 		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
 		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
 	} else {
 	} else {
 		asyw->image.layout = 1;
 		asyw->image.layout = 1;

+ 5 - 4
drivers/gpu/drm/nouveau/nv50_fbcon.c

@@ -25,6 +25,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
 
 
 int
 int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb->vma.offset));
-	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+	OUT_RING(chan, upper_32_bits(fb->vma->addr));
+	OUT_RING(chan, lower_32_bits(fb->vma->addr));
 	BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
 	BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
 	OUT_RING(chan, format);
 	OUT_RING(chan, format);
 	OUT_RING(chan, 1);
 	OUT_RING(chan, 1);
@@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb->vma.offset));
-	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+	OUT_RING(chan, upper_32_bits(fb->vma->addr));
+	OUT_RING(chan, lower_32_bits(fb->vma->addr));
 	FIRE_RING(chan);
 	FIRE_RING(chan);
 
 
 	return 0;
 	return 0;

+ 5 - 38
drivers/gpu/drm/nouveau/nv84_fence.c

@@ -25,6 +25,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 #include "nouveau_fence.h"
+#include "nouveau_vmm.h"
 
 
 #include "nv50_display.h"
 #include "nv50_display.h"
 
 
@@ -68,12 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
 {
 {
 	struct nouveau_channel *chan = fence->channel;
 	struct nouveau_channel *chan = fence->channel;
 	struct nv84_fence_chan *fctx = chan->fence;
 	struct nv84_fence_chan *fctx = chan->fence;
-	u64 addr = chan->chid * 16;
-
-	if (fence->sysmem)
-		addr += fctx->vma_gart.offset;
-	else
-		addr += fctx->vma.offset;
+	u64 addr = fctx->vma->addr + chan->chid * 16;
 
 
 	return fctx->base.emit32(chan, addr, fence->base.seqno);
 	return fctx->base.emit32(chan, addr, fence->base.seqno);
 }
 }
@@ -83,12 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
 {
 	struct nv84_fence_chan *fctx = chan->fence;
 	struct nv84_fence_chan *fctx = chan->fence;
-	u64 addr = prev->chid * 16;
-
-	if (fence->sysmem)
-		addr += fctx->vma_gart.offset;
-	else
-		addr += fctx->vma.offset;
+	u64 addr = fctx->vma->addr + prev->chid * 16;
 
 
 	return fctx->base.sync32(chan, addr, fence->base.seqno);
 	return fctx->base.sync32(chan, addr, fence->base.seqno);
 }
 }
@@ -108,8 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
 
 
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 	mutex_lock(&priv->mutex);
 	mutex_lock(&priv->mutex);
-	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
-	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	nouveau_vma_del(&fctx->vma);
 	mutex_unlock(&priv->mutex);
 	mutex_unlock(&priv->mutex);
 	nouveau_fence_context_del(&fctx->base);
 	nouveau_fence_context_del(&fctx->base);
 	chan->fence = NULL;
 	chan->fence = NULL;
@@ -137,11 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.sequence = nv84_fence_read(chan);
 	fctx->base.sequence = nv84_fence_read(chan);
 
 
 	mutex_lock(&priv->mutex);
 	mutex_lock(&priv->mutex);
-	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
-	if (ret == 0) {
-		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
-					&fctx->vma_gart);
-	}
+	ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
 	mutex_unlock(&priv->mutex);
 	mutex_unlock(&priv->mutex);
 
 
 	if (ret)
 	if (ret)
@@ -182,10 +168,6 @@ static void
 nv84_fence_destroy(struct nouveau_drm *drm)
 nv84_fence_destroy(struct nouveau_drm *drm)
 {
 {
 	struct nv84_fence_priv *priv = drm->fence;
 	struct nv84_fence_priv *priv = drm->fence;
-	nouveau_bo_unmap(priv->bo_gart);
-	if (priv->bo_gart)
-		nouveau_bo_unpin(priv->bo_gart);
-	nouveau_bo_ref(NULL, &priv->bo_gart);
 	nouveau_bo_unmap(priv->bo);
 	nouveau_bo_unmap(priv->bo);
 	if (priv->bo)
 	if (priv->bo)
 		nouveau_bo_unpin(priv->bo);
 		nouveau_bo_unpin(priv->bo);
@@ -238,21 +220,6 @@ nv84_fence_create(struct nouveau_drm *drm)
 			nouveau_bo_ref(NULL, &priv->bo);
 			nouveau_bo_ref(NULL, &priv->bo);
 	}
 	}
 
 
-	if (ret == 0)
-		ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
-				     TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
-				     0, NULL, NULL, &priv->bo_gart);
-	if (ret == 0) {
-		ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
-		if (ret == 0) {
-			ret = nouveau_bo_map(priv->bo_gart);
-			if (ret)
-				nouveau_bo_unpin(priv->bo_gart);
-		}
-		if (ret)
-			nouveau_bo_ref(NULL, &priv->bo_gart);
-	}
-
 	if (ret)
 	if (ret)
 		nv84_fence_destroy(drm);
 		nv84_fence_destroy(drm);
 	return ret;
 	return ret;

+ 5 - 4
drivers/gpu/drm/nouveau/nvc0_fbcon.c

@@ -25,6 +25,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
 
 
 int
 int
 nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 	OUT_RING  (chan, info->fix.line_length);
 	OUT_RING  (chan, info->fix.line_length);
 	OUT_RING  (chan, info->var.xres_virtual);
 	OUT_RING  (chan, info->var.xres_virtual);
 	OUT_RING  (chan, info->var.yres_virtual);
 	OUT_RING  (chan, info->var.yres_virtual);
-	OUT_RING  (chan, upper_32_bits(fb->vma.offset));
-	OUT_RING  (chan, lower_32_bits(fb->vma.offset));
+	OUT_RING  (chan, upper_32_bits(fb->vma->addr));
+	OUT_RING  (chan, lower_32_bits(fb->vma->addr));
 	BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
 	BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
 	OUT_RING  (chan, format);
 	OUT_RING  (chan, format);
 	OUT_RING  (chan, 1);
 	OUT_RING  (chan, 1);
@@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 	OUT_RING  (chan, info->fix.line_length);
 	OUT_RING  (chan, info->fix.line_length);
 	OUT_RING  (chan, info->var.xres_virtual);
 	OUT_RING  (chan, info->var.xres_virtual);
 	OUT_RING  (chan, info->var.yres_virtual);
 	OUT_RING  (chan, info->var.yres_virtual);
-	OUT_RING  (chan, upper_32_bits(fb->vma.offset));
-	OUT_RING  (chan, lower_32_bits(fb->vma.offset));
+	OUT_RING  (chan, upper_32_bits(fb->vma->addr));
+	OUT_RING  (chan, lower_32_bits(fb->vma->addr));
 	FIRE_RING (chan);
 	FIRE_RING (chan);
 
 
 	return 0;
 	return 0;

+ 3 - 0
drivers/gpu/drm/nouveau/nvif/Kbuild

@@ -2,4 +2,7 @@ nvif-y := nvif/object.o
 nvif-y += nvif/client.o
 nvif-y += nvif/client.o
 nvif-y += nvif/device.o
 nvif-y += nvif/device.o
 nvif-y += nvif/driver.o
 nvif-y += nvif/driver.o
+nvif-y += nvif/mem.o
+nvif-y += nvif/mmu.o
 nvif-y += nvif/notify.o
 nvif-y += nvif/notify.o
+nvif-y += nvif/vmm.o

+ 88 - 0
drivers/gpu/drm/nouveau/nvif/mem.c

@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mem.h>
+#include <nvif/client.h>
+
+#include <nvif/if000a.h>
+
+void
+nvif_mem_fini(struct nvif_mem *mem)
+{
+	nvif_object_fini(&mem->object);
+}
+
+int
+nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+		   u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+	struct nvif_mem_v0 *args;
+	u8 stack[128];
+	int ret;
+
+	mem->object.client = NULL;
+	if (type < 0)
+		return -EINVAL;
+
+	if (sizeof(*args) + argc > sizeof(stack)) {
+		if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+			return -ENOMEM;
+	} else {
+		args = (void *)stack;
+	}
+	args->version = 0;
+	args->type = type;
+	args->page = page;
+	args->size = size;
+	memcpy(args->data, argv, argc);
+
+	ret = nvif_object_init(&mmu->object, 0, oclass, args,
+			       sizeof(*args) + argc, &mem->object);
+	if (ret == 0) {
+		mem->type = mmu->type[type].type;
+		mem->page = args->page;
+		mem->addr = args->addr;
+		mem->size = args->size;
+	}
+
+	if (args != (void *)stack)
+		kfree(args);
+	return ret;
+
+}
+
+int
+nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+	      u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+	int ret = -EINVAL, i;
+
+	mem->object.client = NULL;
+
+	for (i = 0; ret && i < mmu->type_nr; i++) {
+		if ((mmu->type[i].type & type) == type) {
+			ret = nvif_mem_init_type(mmu, oclass, i, page, size,
+						 argv, argc, mem);
+		}
+	}
+
+	return ret;
+}

+ 117 - 0
drivers/gpu/drm/nouveau/nvif/mmu.c

@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mmu.h>
+
+#include <nvif/class.h>
+#include <nvif/if0008.h>
+
+void
+nvif_mmu_fini(struct nvif_mmu *mmu)
+{
+	kfree(mmu->kind);
+	kfree(mmu->type);
+	kfree(mmu->heap);
+	nvif_object_fini(&mmu->object);
+}
+
+int
+nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
+{
+	struct nvif_mmu_v0 args;
+	int ret, i;
+
+	args.version = 0;
+	mmu->heap = NULL;
+	mmu->type = NULL;
+	mmu->kind = NULL;
+
+	ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
+			       &mmu->object);
+	if (ret)
+		goto done;
+
+	mmu->dmabits = args.dmabits;
+	mmu->heap_nr = args.heap_nr;
+	mmu->type_nr = args.type_nr;
+	mmu->kind_nr = args.kind_nr;
+
+	mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
+	mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
+	if (ret = -ENOMEM, !mmu->heap || !mmu->type)
+		goto done;
+
+	mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
+	if (!mmu->kind && mmu->kind_nr)
+		goto done;
+
+	for (i = 0; i < mmu->heap_nr; i++) {
+		struct nvif_mmu_heap_v0 args = { .index = i };
+
+		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
+				       &args, sizeof(args));
+		if (ret)
+			goto done;
+
+		mmu->heap[i].size = args.size;
+	}
+
+	for (i = 0; i < mmu->type_nr; i++) {
+		struct nvif_mmu_type_v0 args = { .index = i };
+
+		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
+				       &args, sizeof(args));
+		if (ret)
+			goto done;
+
+		mmu->type[i].type = 0;
+		if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
+		if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
+		if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
+		if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
+		if (args.kind    ) mmu->type[i].type |= NVIF_MEM_KIND;
+		if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
+		if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
+		if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
+		mmu->type[i].heap = args.heap;
+	}
+
+	if (mmu->kind_nr) {
+		struct nvif_mmu_kind_v0 *kind;
+		u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
+
+		if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
+			goto done;
+		kind->version = 0;
+		kind->count = mmu->kind_nr;
+
+		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
+				       kind, argc);
+		if (ret == 0)
+			memcpy(mmu->kind, kind->data, kind->count);
+		kfree(kind);
+	}
+
+done:
+	if (ret)
+		nvif_mmu_fini(mmu);
+	return ret;
+}

+ 59 - 28
drivers/gpu/drm/nouveau/nvif/object.c

@@ -166,46 +166,77 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
 }
 }
 
 
 void
 void
-nvif_object_unmap(struct nvif_object *object)
+nvif_object_unmap_handle(struct nvif_object *object)
+{
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_unmap unmap;
+	} args = {
+		.ioctl.type = NVIF_IOCTL_V0_UNMAP,
+	};
+
+	nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
+		       u64 *handle, u64 *length)
 {
 {
-	if (object->map.size) {
-		struct nvif_client *client = object->client;
-		struct {
-			struct nvif_ioctl_v0 ioctl;
-			struct nvif_ioctl_unmap unmap;
-		} args = {
-			.ioctl.type = NVIF_IOCTL_V0_UNMAP,
-		};
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_map_v0 map;
+	} *args;
+	u32 argn = sizeof(*args) + argc;
+	int ret, maptype;
+
+	if (!(args = kzalloc(argn, GFP_KERNEL)))
+		return -ENOMEM;
+	args->ioctl.type = NVIF_IOCTL_V0_MAP;
+	memcpy(args->map.data, argv, argc);
 
 
-		if (object->map.ptr) {
+	ret = nvif_object_ioctl(object, args, argn, NULL);
+	*handle = args->map.handle;
+	*length = args->map.length;
+	maptype = args->map.type;
+	kfree(args);
+	return ret ? ret : (maptype == NVIF_IOCTL_MAP_V0_IO);
+}
+
+void
+nvif_object_unmap(struct nvif_object *object)
+{
+	struct nvif_client *client = object->client;
+	if (object->map.ptr) {
+		if (object->map.size) {
 			client->driver->unmap(client, object->map.ptr,
 			client->driver->unmap(client, object->map.ptr,
 						      object->map.size);
 						      object->map.size);
-			object->map.ptr = NULL;
+			object->map.size = 0;
 		}
 		}
-
-		nvif_object_ioctl(object, &args, sizeof(args), NULL);
-		object->map.size = 0;
+		object->map.ptr = NULL;
+		nvif_object_unmap_handle(object);
 	}
 	}
 }
 }
 
 
 int
 int
-nvif_object_map(struct nvif_object *object)
+nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
 {
 {
 	struct nvif_client *client = object->client;
 	struct nvif_client *client = object->client;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_map_v0 map;
-	} args = {
-		.ioctl.type = NVIF_IOCTL_V0_MAP,
-	};
-	int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
-	if (ret == 0) {
-		object->map.size = args.map.length;
-		object->map.ptr = client->driver->map(client, args.map.handle,
-						      object->map.size);
-		if (ret = -ENOMEM, object->map.ptr)
+	u64 handle, length;
+	int ret = nvif_object_map_handle(object, argv, argc, &handle, &length);
+	if (ret >= 0) {
+		if (ret) {
+			object->map.ptr = client->driver->map(client,
+							      handle,
+							      length);
+			if (ret = -ENOMEM, object->map.ptr) {
+				object->map.size = length;
+				return 0;
+			}
+		} else {
+			object->map.ptr = (void *)(unsigned long)handle;
 			return 0;
 			return 0;
-		nvif_object_unmap(object);
+		}
+		nvif_object_unmap_handle(object);
 	}
 	}
 	return ret;
 	return ret;
 }
 }

+ 167 - 0
drivers/gpu/drm/nouveau/nvif/vmm.c

@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/if000c.h>
+
+int
+nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
+{
+	return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
+				&(struct nvif_vmm_unmap_v0) { .addr = addr },
+				sizeof(struct nvif_vmm_unmap_v0));
+}
+
+int
+nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
+	     struct nvif_mem *mem, u64 offset)
+{
+	struct nvif_vmm_map_v0 *args;
+	u8 stack[16];
+	int ret;
+
+	if (sizeof(*args) + argc > sizeof(stack)) {
+		if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+			return -ENOMEM;
+	} else {
+		args = (void *)stack;
+	}
+
+	args->version = 0;
+	args->addr = addr;
+	args->size = size;
+	args->memory = nvif_handle(&mem->object);
+	args->offset = offset;
+	memcpy(args->data, argv, argc);
+
+	ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
+			       args, sizeof(*args) + argc);
+	if (args != (void *)stack)
+		kfree(args);
+	return ret;
+}
+
+void
+nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+	if (vma->size) {
+		WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
+					 &(struct nvif_vmm_put_v0) {
+						.addr = vma->addr,
+					 }, sizeof(struct nvif_vmm_put_v0)));
+		vma->size = 0;
+	}
+}
+
+int
+nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
+	     u8 page, u8 align, u64 size, struct nvif_vma *vma)
+{
+	struct nvif_vmm_get_v0 args;
+	int ret;
+
+	args.version = vma->size = 0;
+	args.sparse = sparse;
+	args.page = page;
+	args.align = align;
+	args.size = size;
+
+	switch (type) {
+	case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
+	case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
+	case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
+			       &args, sizeof(args));
+	if (ret == 0) {
+		vma->addr = args.addr;
+		vma->size = args.size;
+	}
+	return ret;
+}
+
+void
+nvif_vmm_fini(struct nvif_vmm *vmm)
+{
+	kfree(vmm->page);
+	nvif_object_fini(&vmm->object);
+}
+
+int
+nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
+	      void *argv, u32 argc, struct nvif_vmm *vmm)
+{
+	struct nvif_vmm_v0 *args;
+	u32 argn = sizeof(*args) + argc;
+	int ret = -ENOSYS, i;
+
+	vmm->object.client = NULL;
+	vmm->page = NULL;
+
+	if (!(args = kmalloc(argn, GFP_KERNEL)))
+		return -ENOMEM;
+	args->version = 0;
+	args->addr = addr;
+	args->size = size;
+	memcpy(args->data, argv, argc);
+
+	ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
+			       &vmm->object);
+	if (ret)
+		goto done;
+
+	vmm->start = args->addr;
+	vmm->limit = args->size;
+
+	vmm->page_nr = args->page_nr;
+	vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
+	if (!vmm->page) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0; i < vmm->page_nr; i++) {
+		struct nvif_vmm_page_v0 args = { .index = i };
+
+		ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
+				       &args, sizeof(args));
+		if (ret)
+			break;
+
+		vmm->page[i].shift = args.shift;
+		vmm->page[i].sparse = args.sparse;
+		vmm->page[i].vram = args.vram;
+		vmm->page[i].host = args.host;
+		vmm->page[i].comp = args.comp;
+	}
+
+done:
+	if (ret)
+		nvif_vmm_fini(vmm);
+	kfree(args);
+	return ret;
+}

+ 2 - 0
drivers/gpu/drm/nouveau/nvkm/core/client.c

@@ -301,5 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
 	client->debug = nvkm_dbgopt(dbg, "CLIENT");
 	client->debug = nvkm_dbgopt(dbg, "CLIENT");
 	client->objroot = RB_ROOT;
 	client->objroot = RB_ROOT;
 	client->ntfy = ntfy;
 	client->ntfy = ntfy;
+	INIT_LIST_HEAD(&client->umem);
+	spin_lock_init(&client->lock);
 	return 0;
 	return 0;
 }
 }

+ 10 - 0
drivers/gpu/drm/nouveau/nvkm/core/engine.c

@@ -126,6 +126,15 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
 	return ret;
 	return ret;
 }
 }
 
 
+static int
+nvkm_engine_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->preinit)
+		engine->func->preinit(engine);
+	return 0;
+}
+
 static void *
 static void *
 nvkm_engine_dtor(struct nvkm_subdev *subdev)
 nvkm_engine_dtor(struct nvkm_subdev *subdev)
 {
 {
@@ -138,6 +147,7 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
 static const struct nvkm_subdev_func
 static const struct nvkm_subdev_func
 nvkm_engine_func = {
 nvkm_engine_func = {
 	.dtor = nvkm_engine_dtor,
 	.dtor = nvkm_engine_dtor,
+	.preinit = nvkm_engine_preinit,
 	.init = nvkm_engine_init,
 	.init = nvkm_engine_init,
 	.fini = nvkm_engine_fini,
 	.fini = nvkm_engine_fini,
 	.intr = nvkm_engine_intr,
 	.intr = nvkm_engine_intr,

+ 25 - 22
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c

@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 }
 }
 
 
 /* accessor functions for gpuobjs allocated directly from instmem */
 /* accessor functions for gpuobjs allocated directly from instmem */
+static int
+nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+		     struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		     void *argv, u32 argc)
+{
+	return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
+}
+
 static u32
 static u32
 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
 {
@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
 	.release = nvkm_gpuobj_heap_release,
 	.release = nvkm_gpuobj_heap_release,
 	.rd32 = nvkm_gpuobj_rd32_fast,
 	.rd32 = nvkm_gpuobj_rd32_fast,
 	.wr32 = nvkm_gpuobj_wr32_fast,
 	.wr32 = nvkm_gpuobj_wr32_fast,
+	.map = nvkm_gpuobj_heap_map,
 };
 };
 
 
 static const struct nvkm_gpuobj_func
 static const struct nvkm_gpuobj_func
@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
 	.release = nvkm_gpuobj_heap_release,
 	.release = nvkm_gpuobj_heap_release,
 	.rd32 = nvkm_gpuobj_heap_rd32,
 	.rd32 = nvkm_gpuobj_heap_rd32,
 	.wr32 = nvkm_gpuobj_heap_wr32,
 	.wr32 = nvkm_gpuobj_heap_wr32,
+	.map = nvkm_gpuobj_heap_map,
 };
 };
 
 
 static void *
 static void *
@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
 static const struct nvkm_gpuobj_func
 static const struct nvkm_gpuobj_func
 nvkm_gpuobj_heap = {
 nvkm_gpuobj_heap = {
 	.acquire = nvkm_gpuobj_heap_acquire,
 	.acquire = nvkm_gpuobj_heap_acquire,
+	.map = nvkm_gpuobj_heap_map,
 };
 };
 
 
 /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
 /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+		struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+		void *argv, u32 argc)
+{
+	return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
+			       vmm, vma, argv, argc);
+}
+
 static u32
 static u32
 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
 {
@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
 	.release = nvkm_gpuobj_release,
 	.release = nvkm_gpuobj_release,
 	.rd32 = nvkm_gpuobj_rd32_fast,
 	.rd32 = nvkm_gpuobj_rd32_fast,
 	.wr32 = nvkm_gpuobj_wr32_fast,
 	.wr32 = nvkm_gpuobj_wr32_fast,
+	.map = nvkm_gpuobj_map,
 };
 };
 
 
 static const struct nvkm_gpuobj_func
 static const struct nvkm_gpuobj_func
@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
 	.release = nvkm_gpuobj_release,
 	.release = nvkm_gpuobj_release,
 	.rd32 = nvkm_gpuobj_rd32,
 	.rd32 = nvkm_gpuobj_rd32,
 	.wr32 = nvkm_gpuobj_wr32,
 	.wr32 = nvkm_gpuobj_wr32,
+	.map = nvkm_gpuobj_map,
 };
 };
 
 
 static void *
 static void *
@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
 static const struct nvkm_gpuobj_func
 static const struct nvkm_gpuobj_func
 nvkm_gpuobj_func = {
 nvkm_gpuobj_func = {
 	.acquire = nvkm_gpuobj_acquire,
 	.acquire = nvkm_gpuobj_acquire,
+	.map = nvkm_gpuobj_map,
 };
 };
 
 
 static int
 static int
@@ -185,7 +208,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
 		gpuobj->size = nvkm_memory_size(gpuobj->memory);
 		gpuobj->size = nvkm_memory_size(gpuobj->memory);
 	}
 	}
 
 
-	return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+	return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
 }
 }
 
 
 void
 void
@@ -196,7 +219,7 @@ nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
 		if (gpuobj->parent)
 		if (gpuobj->parent)
 			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
 			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
 		nvkm_mm_fini(&gpuobj->heap);
 		nvkm_mm_fini(&gpuobj->heap);
-		nvkm_memory_del(&gpuobj->memory);
+		nvkm_memory_unref(&gpuobj->memory);
 		kfree(*pgpuobj);
 		kfree(*pgpuobj);
 		*pgpuobj = NULL;
 		*pgpuobj = NULL;
 	}
 	}
@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
 	return ret;
 	return ret;
 }
 }
 
 
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
-		u32 access, struct nvkm_vma *vma)
-{
-	struct nvkm_memory *memory = gpuobj->memory;
-	int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
-	if (ret == 0)
-		nvkm_memory_map(memory, vma, 0);
-	return ret;
-}
-
-void
-nvkm_gpuobj_unmap(struct nvkm_vma *vma)
-{
-	if (vma->node) {
-		nvkm_vm_unmap(vma);
-		nvkm_vm_put(vma);
-	}
-}
-
 /* the below is basically only here to support sharing the paged dma object
 /* the below is basically only here to support sharing the paged dma object
  * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
  * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
  * anywhere else.
  * anywhere else.

+ 11 - 4
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c

@@ -53,7 +53,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client,
 	union {
 	union {
 		struct nvif_ioctl_sclass_v0 v0;
 		struct nvif_ioctl_sclass_v0 v0;
 	} *args = data;
 	} *args = data;
-	struct nvkm_oclass oclass;
+	struct nvkm_oclass oclass = { .client = client };
 	int ret = -ENOSYS, i = 0;
 	int ret = -ENOSYS, i = 0;
 
 
 	nvif_ioctl(object, "sclass size %d\n", size);
 	nvif_ioctl(object, "sclass size %d\n", size);
@@ -257,13 +257,19 @@ nvkm_ioctl_map(struct nvkm_client *client,
 	union {
 	union {
 		struct nvif_ioctl_map_v0 v0;
 		struct nvif_ioctl_map_v0 v0;
 	} *args = data;
 	} *args = data;
+	enum nvkm_object_map type;
 	int ret = -ENOSYS;
 	int ret = -ENOSYS;
 
 
 	nvif_ioctl(object, "map size %d\n", size);
 	nvif_ioctl(object, "map size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
 		nvif_ioctl(object, "map vers %d\n", args->v0.version);
 		nvif_ioctl(object, "map vers %d\n", args->v0.version);
-		ret = nvkm_object_map(object, &args->v0.handle,
-					      &args->v0.length);
+		ret = nvkm_object_map(object, data, size, &type,
+				      &args->v0.handle,
+				      &args->v0.length);
+		if (type == NVKM_OBJECT_MAP_IO)
+			args->v0.type = NVIF_IOCTL_MAP_V0_IO;
+		else
+			args->v0.type = NVIF_IOCTL_MAP_V0_VA;
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -281,6 +287,7 @@ nvkm_ioctl_unmap(struct nvkm_client *client,
 	nvif_ioctl(object, "unmap size %d\n", size);
 	nvif_ioctl(object, "unmap size %d\n", size);
 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
 		nvif_ioctl(object, "unmap\n");
 		nvif_ioctl(object, "unmap\n");
+		ret = nvkm_object_unmap(object);
 	}
 	}
 
 
 	return ret;
 	return ret;

+ 94 - 5
drivers/gpu/drm/nouveau/nvkm/core/memory.c

@@ -22,27 +22,116 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
  */
 #include <core/memory.h>
 #include <core/memory.h>
+#include <core/mm.h>
+#include <subdev/fb.h>
 #include <subdev/instmem.h>
 #include <subdev/instmem.h>
 
 
+void
+nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
+		     struct nvkm_tags **ptags)
+{
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_tags *tags = *ptags;
+	if (tags) {
+		mutex_lock(&fb->subdev.mutex);
+		if (refcount_dec_and_test(&tags->refcount)) {
+			nvkm_mm_free(&fb->tags, &tags->mn);
+			kfree(memory->tags);
+			memory->tags = NULL;
+		}
+		mutex_unlock(&fb->subdev.mutex);
+		*ptags = NULL;
+	}
+}
+
+int
+nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
+		     u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
+		     struct nvkm_tags **ptags)
+{
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_tags *tags;
+
+	mutex_lock(&fb->subdev.mutex);
+	if ((tags = memory->tags)) {
+		/* If comptags exist for the memory, but a different amount
+		 * than requested, the buffer is being mapped with settings
+		 * that are incompatible with existing mappings.
+		 */
+		if (tags->mn && tags->mn->length != nr) {
+			mutex_unlock(&fb->subdev.mutex);
+			return -EINVAL;
+		}
+
+		refcount_inc(&tags->refcount);
+		*ptags = tags;
+		return 0;
+	}
+
+	if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
+		mutex_unlock(&fb->subdev.mutex);
+		return -ENOMEM;
+	}
+
+	if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+		if (clr)
+			clr(device, tags->mn->offset, tags->mn->length);
+	} else {
+		/* Failure to allocate HW comptags is not an error, the
+		 * caller should fall back to an uncompressed map.
+		 *
+		 * As memory can be mapped in multiple places, we still
+		 * need to track the allocation failure and ensure that
+		 * any additional mappings remain uncompressed.
+		 *
+		 * This is handled by returning an empty nvkm_tags.
+		 */
+		tags->mn = NULL;
+	}
+
+	refcount_set(&tags->refcount, 1);
+	mutex_unlock(&fb->subdev.mutex);
+	*ptags = tags;
+	return 0;
+}
+
 void
 void
 nvkm_memory_ctor(const struct nvkm_memory_func *func,
 nvkm_memory_ctor(const struct nvkm_memory_func *func,
 		 struct nvkm_memory *memory)
 		 struct nvkm_memory *memory)
 {
 {
 	memory->func = func;
 	memory->func = func;
+	kref_init(&memory->kref);
+}
+
+static void
+nvkm_memory_del(struct kref *kref)
+{
+	struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
+	if (!WARN_ON(!memory->func)) {
+		if (memory->func->dtor)
+			memory = memory->func->dtor(memory);
+		kfree(memory);
+	}
 }
 }
 
 
 void
 void
-nvkm_memory_del(struct nvkm_memory **pmemory)
+nvkm_memory_unref(struct nvkm_memory **pmemory)
 {
 {
 	struct nvkm_memory *memory = *pmemory;
 	struct nvkm_memory *memory = *pmemory;
-	if (memory && !WARN_ON(!memory->func)) {
-		if (memory->func->dtor)
-			*pmemory = memory->func->dtor(memory);
-		kfree(*pmemory);
+	if (memory) {
+		kref_put(&memory->kref, nvkm_memory_del);
 		*pmemory = NULL;
 		*pmemory = NULL;
 	}
 	}
 }
 }
 
 
+struct nvkm_memory *
+nvkm_memory_ref(struct nvkm_memory *memory)
+{
+	if (memory)
+		kref_get(&memory->kref);
+	return memory;
+}
+
 int
 int
 nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
 nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
 		u64 size, u32 align, bool zero,
 		u64 size, u32 align, bool zero,

+ 3 - 2
drivers/gpu/drm/nouveau/nvkm/core/mm.c

@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
 }
 }
 
 
 int
 int
-nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
+nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
 {
 {
 	struct nvkm_mm_node *node, *prev;
 	struct nvkm_mm_node *node, *prev;
 	u32 next;
 	u32 next;
@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
 
 
 	list_add_tail(&node->nl_entry, &mm->nodes);
 	list_add_tail(&node->nl_entry, &mm->nodes);
 	list_add_tail(&node->fl_entry, &mm->free);
 	list_add_tail(&node->fl_entry, &mm->free);
-	node->heap = ++mm->heap_nodes;
+	node->heap = heap;
+	mm->heap_nodes++;
 	return 0;
 	return 0;
 }
 }
 
 

+ 12 - 2
drivers/gpu/drm/nouveau/nvkm/core/object.c

@@ -102,10 +102,19 @@ nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
 }
 }
 
 
 int
 int
-nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
+		enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 {
 	if (likely(object->func->map))
 	if (likely(object->func->map))
-		return object->func->map(object, addr, size);
+		return object->func->map(object, argv, argc, type, addr, size);
+	return -ENODEV;
+}
+
+int
+nvkm_object_unmap(struct nvkm_object *object)
+{
+	if (likely(object->func->unmap))
+		return object->func->unmap(object);
 	return -ENODEV;
 	return -ENODEV;
 }
 }
 
 
@@ -259,6 +268,7 @@ nvkm_object_dtor(struct nvkm_object *object)
 	}
 	}
 
 
 	nvif_debug(object, "destroy running...\n");
 	nvif_debug(object, "destroy running...\n");
+	nvkm_object_unmap(object);
 	if (object->func->dtor)
 	if (object->func->dtor)
 		data = object->func->dtor(object);
 		data = object->func->dtor(object);
 	nvkm_engine_unref(&object->engine);
 	nvkm_engine_unref(&object->engine);

+ 11 - 2
drivers/gpu/drm/nouveau/nvkm/core/oproxy.c

@@ -37,9 +37,17 @@ nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
 }
 }
 
 
 static int
 static int
-nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc,
+		enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 {
-	return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	return nvkm_object_map(oproxy->object, argv, argc, type, addr, size);
+}
+
+static int
+nvkm_oproxy_unmap(struct nvkm_object *object)
+{
+	return nvkm_object_unmap(nvkm_oproxy(object)->object);
 }
 }
 
 
 static int
 static int
@@ -171,6 +179,7 @@ nvkm_oproxy_func = {
 	.mthd = nvkm_oproxy_mthd,
 	.mthd = nvkm_oproxy_mthd,
 	.ntfy = nvkm_oproxy_ntfy,
 	.ntfy = nvkm_oproxy_ntfy,
 	.map = nvkm_oproxy_map,
 	.map = nvkm_oproxy_map,
+	.unmap = nvkm_oproxy_unmap,
 	.rd08 = nvkm_oproxy_rd08,
 	.rd08 = nvkm_oproxy_rd08,
 	.rd16 = nvkm_oproxy_rd16,
 	.rd16 = nvkm_oproxy_rd16,
 	.rd32 = nvkm_oproxy_rd32,
 	.rd32 = nvkm_oproxy_rd32,

+ 1 - 0
drivers/gpu/drm/nouveau/nvkm/core/ramht.c

@@ -21,6 +21,7 @@
  */
  */
 #include <core/ramht.h>
 #include <core/ramht.h>
 #include <core/engine.h>
 #include <core/engine.h>
+#include <core/object.h>
 
 
 static u32
 static u32
 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)

+ 52 - 46
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c

@@ -927,7 +927,7 @@ nv84_chipset = {
 	.i2c = nv50_i2c_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
 	.pci = g84_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -959,7 +959,7 @@ nv86_chipset = {
 	.i2c = nv50_i2c_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
 	.pci = g84_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -991,7 +991,7 @@ nv92_chipset = {
 	.i2c = nv50_i2c_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g92_pci_new,
 	.pci = g92_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1023,7 +1023,7 @@ nv94_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1055,7 +1055,7 @@ nv96_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1087,7 +1087,7 @@ nv98_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g98_mc_new,
 	.mc = g98_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1119,7 +1119,7 @@ nva0_chipset = {
 	.i2c = nv50_i2c_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g84_mc_new,
 	.mc = g84_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1151,7 +1151,7 @@ nva3_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
 	.pmu = gt215_pmu_new,
@@ -1185,7 +1185,7 @@ nva5_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
 	.pmu = gt215_pmu_new,
@@ -1218,7 +1218,7 @@ nva8_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
 	.pmu = gt215_pmu_new,
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g98_mc_new,
 	.mc = g98_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = g98_mc_new,
 	.mc = g98_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.therm = g84_therm_new,
 	.therm = g84_therm_new,
@@ -1315,7 +1315,7 @@ nvaf_chipset = {
 	.i2c = g94_i2c_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.mc = gt215_mc_new,
 	.mc = gt215_mc_new,
-	.mmu = nv50_mmu_new,
+	.mmu = g84_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
 	.pci = g94_pci_new,
 	.pmu = gt215_pmu_new,
 	.pmu = gt215_pmu_new,
@@ -1678,7 +1678,7 @@ nve4_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
 	.pmu = gk104_pmu_new,
@@ -1717,7 +1717,7 @@ nve6_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
 	.pmu = gk104_pmu_new,
@@ -1756,7 +1756,7 @@ nve7_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
 	.pmu = gk104_pmu_new,
@@ -1790,7 +1790,7 @@ nvea_chipset = {
 	.imem = gk20a_instmem_new,
 	.imem = gk20a_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk20a_mmu_new,
 	.pmu = gk20a_pmu_new,
 	.pmu = gk20a_pmu_new,
 	.timer = gk20a_timer_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 	.top = gk104_top_new,
@@ -1820,7 +1820,7 @@ nvf0_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk110_pmu_new,
 	.pmu = gk110_pmu_new,
@@ -1858,7 +1858,7 @@ nvf1_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk104_mc_new,
 	.mc = gk104_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk110_pmu_new,
 	.pmu = gk110_pmu_new,
@@ -1896,7 +1896,7 @@ nv106_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk208_pmu_new,
 	.pmu = gk208_pmu_new,
@@ -1934,7 +1934,7 @@ nv108_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gk208_pmu_new,
 	.pmu = gk208_pmu_new,
@@ -1958,7 +1958,7 @@ nv108_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv117_chipset = {
 nv117_chipset = {
 	.name = "GM107",
 	.name = "GM107",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.clk = gk104_clk_new,
 	.clk = gk104_clk_new,
@@ -1972,7 +1972,7 @@ nv117_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gm107_ltc_new,
 	.ltc = gm107_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.pmu = gm107_pmu_new,
@@ -1992,7 +1992,7 @@ nv117_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv118_chipset = {
 nv118_chipset = {
 	.name = "GM108",
 	.name = "GM108",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.clk = gk104_clk_new,
 	.clk = gk104_clk_new,
@@ -2006,7 +2006,7 @@ nv118_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gm107_ltc_new,
 	.ltc = gm107_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gk104_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.pmu = gm107_pmu_new,
@@ -2026,7 +2026,7 @@ nv118_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv120_chipset = {
 nv120_chipset = {
 	.name = "GM200",
 	.name = "GM200",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2039,7 +2039,7 @@ nv120_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gm200_ltc_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm200_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.pmu = gm107_pmu_new,
@@ -2061,7 +2061,7 @@ nv120_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv124_chipset = {
 nv124_chipset = {
 	.name = "GM204",
 	.name = "GM204",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2074,7 +2074,7 @@ nv124_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gm200_ltc_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm200_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.pmu = gm107_pmu_new,
@@ -2096,7 +2096,7 @@ nv124_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv126_chipset = {
 nv126_chipset = {
 	.name = "GM206",
 	.name = "GM206",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2109,7 +2109,7 @@ nv126_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gm200_ltc_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm200_mmu_new,
 	.mxm = nv50_mxm_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
 	.pmu = gm107_pmu_new,
@@ -2131,7 +2131,7 @@ nv126_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv12b_chipset = {
 nv12b_chipset = {
 	.name = "GM20B",
 	.name = "GM20B",
-	.bar = gk20a_bar_new,
+	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.clk = gm20b_clk_new,
 	.clk = gm20b_clk_new,
 	.fb = gm20b_fb_new,
 	.fb = gm20b_fb_new,
@@ -2140,7 +2140,7 @@ nv12b_chipset = {
 	.imem = gk20a_instmem_new,
 	.imem = gk20a_instmem_new,
 	.ltc = gm200_ltc_new,
 	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mc = gk20a_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gm20b_mmu_new,
 	.pmu = gm20b_pmu_new,
 	.pmu = gm20b_pmu_new,
 	.secboot = gm20b_secboot_new,
 	.secboot = gm20b_secboot_new,
 	.timer = gk20a_timer_new,
 	.timer = gk20a_timer_new,
@@ -2156,7 +2156,7 @@ nv12b_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv130_chipset = {
 nv130_chipset = {
 	.name = "GP100",
 	.name = "GP100",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2168,7 +2168,8 @@ nv130_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gm200_secboot_new,
 	.secboot = gm200_secboot_new,
 	.pci = gp100_pci_new,
 	.pci = gp100_pci_new,
 	.pmu = gp100_pmu_new,
 	.pmu = gp100_pmu_new,
@@ -2190,7 +2191,7 @@ nv130_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv132_chipset = {
 nv132_chipset = {
 	.name = "GP102",
 	.name = "GP102",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2202,7 +2203,8 @@ nv132_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.pmu = gp102_pmu_new,
@@ -2224,7 +2226,7 @@ nv132_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv134_chipset = {
 nv134_chipset = {
 	.name = "GP104",
 	.name = "GP104",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2236,7 +2238,8 @@ nv134_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.pmu = gp102_pmu_new,
@@ -2258,7 +2261,7 @@ nv134_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv136_chipset = {
 nv136_chipset = {
 	.name = "GP106",
 	.name = "GP106",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2270,7 +2273,8 @@ nv136_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.pmu = gp102_pmu_new,
@@ -2292,7 +2296,7 @@ nv136_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv137_chipset = {
 nv137_chipset = {
 	.name = "GP107",
 	.name = "GP107",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2304,7 +2308,8 @@ nv137_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.secboot = gp102_secboot_new,
 	.secboot = gp102_secboot_new,
 	.pci = gp100_pci_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.pmu = gp102_pmu_new,
@@ -2326,7 +2331,7 @@ nv137_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv138_chipset = {
 nv138_chipset = {
 	.name = "GP108",
 	.name = "GP108",
-	.bar = gf100_bar_new,
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
 	.devinit = gm200_devinit_new,
@@ -2338,7 +2343,8 @@ nv138_chipset = {
 	.imem = nv50_instmem_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp100_ltc_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp100_mc_new,
 	.mc = gp100_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp100_mmu_new,
+	.therm = gp100_therm_new,
 	.pci = gp100_pci_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
 	.timer = gk20a_timer_new,
@@ -2355,7 +2361,7 @@ nv138_chipset = {
 static const struct nvkm_device_chip
 static const struct nvkm_device_chip
 nv13b_chipset = {
 nv13b_chipset = {
 	.name = "GP10B",
 	.name = "GP10B",
-	.bar = gk20a_bar_new,
+	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
 	.bus = gf100_bus_new,
 	.fb = gp10b_fb_new,
 	.fb = gp10b_fb_new,
 	.fuse = gm107_fuse_new,
 	.fuse = gm107_fuse_new,

+ 1 - 1
drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h

@@ -1,7 +1,7 @@
 #ifndef __NVKM_DEVICE_CTRL_H__
 #ifndef __NVKM_DEVICE_CTRL_H__
 #define __NVKM_DEVICE_CTRL_H__
 #define __NVKM_DEVICE_CTRL_H__
 #define nvkm_control(p) container_of((p), struct nvkm_control, object)
 #define nvkm_control(p) container_of((p), struct nvkm_control, object)
-#include <core/device.h>
+#include <core/object.h>
 
 
 struct nvkm_control {
 struct nvkm_control {
 	struct nvkm_object object;
 	struct nvkm_object object;

+ 12 - 12
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c

@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
 	const struct nvkm_device_pci_vendor *pciv;
 	const struct nvkm_device_pci_vendor *pciv;
 	const char *name = NULL;
 	const char *name = NULL;
 	struct nvkm_device_pci *pdev;
 	struct nvkm_device_pci *pdev;
-	int ret;
+	int ret, bits;
 
 
 	ret = pci_enable_device(pci_dev);
 	ret = pci_enable_device(pci_dev);
 	if (ret)
 	if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/*
-	 * Set a preliminary DMA mask based on the .dma_bits member of the
-	 * MMU subdevice. This allows other subdevices to create DMA mappings
-	 * in their init() or oneinit() methods, which may be called before the
-	 * TTM layer sets the DMA mask definitively.
-	 * This is necessary for platforms where the default DMA mask of 32
-	 * does not cover any system memory, i.e., when all RAM is > 4 GB.
-	 */
-	if (pdev->device.mmu)
-		dma_set_mask_and_coherent(&pci_dev->dev,
-				DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+	/* Set DMA mask based on capabilities reported by the MMU subdev. */
+	if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
+		bits = pdev->device.mmu->dma_bits;
+	else
+		bits = 32;
+
+	ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
+	if (ret && bits != 32) {
+		dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+		pdev->device.mmu->dma_bits = 32;
+	}
 
 
 	return 0;
 	return 0;
 }
 }

+ 2 - 4
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c

@@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
 		if (ret)
 		if (ret)
 			goto free_domain;
 			goto free_domain;
 
 
-		ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
 				   (1ULL << tdev->func->iommu_bit) >>
 				   (1ULL << tdev->func->iommu_bit) >>
 				   tdev->iommu.pgshift, 1);
 				   tdev->iommu.pgshift, 1);
 		if (ret)
 		if (ret)
@@ -216,7 +216,7 @@ nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
 	if (tdev->irq) {
 	if (tdev->irq) {
 		free_irq(tdev->irq, tdev);
 		free_irq(tdev->irq, tdev);
 		tdev->irq = 0;
 		tdev->irq = 0;
-	};
+	}
 }
 }
 
 
 static int
 static int
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 
 
 	/**
 	/**
 	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
 	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
-	 * This will be refined in nouveau_ttm_init but we need to do it early
-	 * for instmem to behave properly
 	 */
 	 */
 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
 	if (ret)
 	if (ret)

+ 8 - 1
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c

@@ -206,10 +206,12 @@ nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
 }
 }
 
 
 static int
 static int
-nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
+		 enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 {
 	struct nvkm_udevice *udev = nvkm_udevice(object);
 	struct nvkm_udevice *udev = nvkm_udevice(object);
 	struct nvkm_device *device = udev->device;
 	struct nvkm_device *device = udev->device;
+	*type = NVKM_OBJECT_MAP_IO;
 	*addr = device->func->resource_addr(device, 0);
 	*addr = device->func->resource_addr(device, 0);
 	*size = device->func->resource_size(device, 0);
 	*size = device->func->resource_size(device, 0);
 	return 0;
 	return 0;
@@ -292,6 +294,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
 	if (!sclass) {
 	if (!sclass) {
 		switch (index) {
 		switch (index) {
 		case 0: sclass = &nvkm_control_oclass; break;
 		case 0: sclass = &nvkm_control_oclass; break;
+		case 1:
+			if (!device->mmu)
+				return -EINVAL;
+			sclass = &device->mmu->user;
+			break;
 		default:
 		default:
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}

+ 3 - 1
drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c

@@ -191,11 +191,13 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
 }
 }
 
 
 static int
 static int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+		   enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*type = NVKM_OBJECT_MAP_IO;
 	*addr = device->func->resource_addr(device, 0) +
 	*addr = device->func->resource_addr(device, 0) +
 		0x640000 + (chan->chid.user * 0x1000);
 		0x640000 + (chan->chid.user * 0x1000);
 	*size = 0x001000;
 	*size = 0x001000;

+ 1 - 0
drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h

@@ -1,6 +1,7 @@
 #ifndef __NV50_DISP_CHAN_H__
 #ifndef __NV50_DISP_CHAN_H__
 #define __NV50_DISP_CHAN_H__
 #define __NV50_DISP_CHAN_H__
 #define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
 #define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include <core/object.h>
 #include "nv50.h"
 #include "nv50.h"
 
 
 struct nv50_disp_chan {
 struct nv50_disp_chan {

+ 1 - 1
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h

@@ -147,7 +147,7 @@ void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
 
 
 #define IOR_MSG(i,l,f,a...) do {                                               \
 #define IOR_MSG(i,l,f,a...) do {                                               \
 	struct nvkm_ior *_ior = (i);                                           \
 	struct nvkm_ior *_ior = (i);                                           \
-	nvkm_##l(&_ior->disp->engine.subdev, "%s: "f, _ior->name, ##a);        \
+	nvkm_##l(&_ior->disp->engine.subdev, "%s: "f"\n", _ior->name, ##a);    \
 } while(0)
 } while(0)
 #define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
 #define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
 #define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)
 #define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)

+ 3 - 3
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c

@@ -26,7 +26,7 @@
 
 
 #include <core/gpuobj.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
+#include <subdev/mmu/vmm.h>
 
 
 #include <nvif/class.h>
 #include <nvif/class.h>
 
 
@@ -49,8 +49,8 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
 	int ret;
 	int ret;
 
 
 	if (dmaobj->clone) {
 	if (dmaobj->clone) {
-		struct nv04_mmu *mmu = nv04_mmu(device->mmu);
-		struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+		struct nvkm_memory *pgt =
+			device->mmu->vmm->pd->pt[0]->memory;
 		if (!dmaobj->base.start)
 		if (!dmaobj->base.start)
 			return nvkm_gpuobj_wrap(pgt, pgpuobj);
 			return nvkm_gpuobj_wrap(pgt, pgpuobj);
 		nvkm_kmap(pgt);
 		nvkm_kmap(pgt);

+ 1 - 1
drivers/gpu/drm/nouveau/nvkm/engine/falcon.c

@@ -99,7 +99,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
 	const u32 base = falcon->addr;
 	const u32 base = falcon->addr;
 
 
 	if (!suspend) {
 	if (!suspend) {
-		nvkm_memory_del(&falcon->core);
+		nvkm_memory_unref(&falcon->core);
 		if (falcon->external) {
 		if (falcon->external) {
 			vfree(falcon->data.data);
 			vfree(falcon->data.data);
 			vfree(falcon->code.data);
 			vfree(falcon->code.data);

+ 8 - 0
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c

@@ -27,6 +27,7 @@
 #include <core/client.h>
 #include <core/client.h>
 #include <core/gpuobj.h>
 #include <core/gpuobj.h>
 #include <core/notify.h>
 #include <core/notify.h>
+#include <subdev/mc.h>
 
 
 #include <nvif/event.h>
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 #include <nvif/unpack.h>
@@ -278,6 +279,12 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
 	return 0;
 	return 0;
 }
 }
 
 
+static void
+nvkm_fifo_preinit(struct nvkm_engine *engine)
+{
+	nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+}
+
 static int
 static int
 nvkm_fifo_init(struct nvkm_engine *engine)
 nvkm_fifo_init(struct nvkm_engine *engine)
 {
 {
@@ -302,6 +309,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
 static const struct nvkm_engine_func
 static const struct nvkm_engine_func
 nvkm_fifo = {
 nvkm_fifo = {
 	.dtor = nvkm_fifo_dtor,
 	.dtor = nvkm_fifo_dtor,
+	.preinit = nvkm_fifo_preinit,
 	.oneinit = nvkm_fifo_oneinit,
 	.oneinit = nvkm_fifo_oneinit,
 	.init = nvkm_fifo_init,
 	.init = nvkm_fifo_init,
 	.fini = nvkm_fifo_fini,
 	.fini = nvkm_fifo_fini,

Деякі файли не було показано, через те що забагато файлів було змінено