Browse Source

Merge tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel into drm-next

Pull request already again to get the s/fence/dma_fence/ stuff in and
allow everyone to resync. Otherwise really just misc stuff all over, and a
new bridge driver.

* tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel:
  drm/bridge: fix platform_no_drv_owner.cocci warnings
  drm/bridge: fix semicolon.cocci warnings
  drm: Print some debug/error info during DP dual mode detect
  drm: mark drm_of_component_match_add dummy inline
  drm/bridge: add Silicon Image SiI8620 driver
  dt-bindings: add Silicon Image SiI8620 bridge bindings
  video: add header file for Mobile High-Definition Link (MHL) interface
  drm: convert DT component matching to component_match_add_release()
  dma-buf: Rename struct fence to dma_fence
  dma-buf/fence: add an lockdep_assert_held()
  drm/dp: Factor out helper to distinguish between branch and sink devices
  drm/edid: Only print the bad edid when aborting
  drm/msm: add missing header dependencies
  drm/msm/adreno: move function declarations to header file
  drm/i2c/tda998x: mark symbol static where possible
  doc: add missing docbook parameter for fence-array
  drm: RIP mode_config->rotation_property
  drm/msm/mdp5: Advertize 180 degree rotation
  drm/msm/mdp5: Use per-plane rotation property
Dave Airlie 8 years ago
parent
commit
220196b384
100 changed files with 4099 additions and 896 deletions
  1. 33 0
      Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
  2. 7 7
      Documentation/sync_file.txt
  3. 3 3
      drivers/base/Kconfig
  4. 1 1
      drivers/dma-buf/Kconfig
  5. 1 1
      drivers/dma-buf/Makefile
  6. 14 14
      drivers/dma-buf/dma-buf.c
  7. 46 45
      drivers/dma-buf/dma-fence-array.c
  8. 103 98
      drivers/dma-buf/dma-fence.c
  9. 48 46
      drivers/dma-buf/reservation.c
  10. 9 9
      drivers/dma-buf/seqno-fence.c
  11. 24 24
      drivers/dma-buf/sw_sync.c
  12. 7 6
      drivers/dma-buf/sync_debug.c
  13. 4 5
      drivers/dma-buf/sync_debug.h
  14. 33 30
      drivers/dma-buf/sync_file.c
  15. 16 16
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  16. 4 4
      drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
  17. 8 8
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  18. 11 11
      drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
  19. 7 7
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  20. 8 8
      drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
  21. 29 29
      drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
  22. 3 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
  23. 11 11
      drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
  24. 7 7
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
  25. 4 4
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
  26. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
  27. 12 12
      drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
  28. 25 23
      drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
  29. 5 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
  30. 6 6
      drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
  31. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
  32. 9 9
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
  33. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
  34. 13 13
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
  35. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
  36. 13 13
      drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
  37. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
  38. 40 39
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  39. 5 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
  40. 3 3
      drivers/gpu/drm/amd/amdgpu/cik_sdma.c
  41. 3 3
      drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
  42. 3 3
      drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
  43. 6 6
      drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
  44. 3 3
      drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
  45. 3 3
      drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
  46. 3 3
      drivers/gpu/drm/amd/amdgpu/si_dma.c
  47. 2 2
      drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
  48. 34 33
      drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
  49. 13 13
      drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
  50. 26 22
      drivers/gpu/drm/amd/scheduler/sched_fence.c
  51. 2 1
      drivers/gpu/drm/arm/hdlcd_drv.c
  52. 3 1
      drivers/gpu/drm/arm/malidp_drv.c
  53. 1 1
      drivers/gpu/drm/armada/armada_drv.c
  54. 7 0
      drivers/gpu/drm/bridge/Kconfig
  55. 1 0
      drivers/gpu/drm/bridge/Makefile
  56. 1564 0
      drivers/gpu/drm/bridge/sil-sii8620.c
  57. 1517 0
      drivers/gpu/drm/bridge/sil-sii8620.h
  58. 3 5
      drivers/gpu/drm/drm_atomic.c
  59. 4 4
      drivers/gpu/drm/drm_atomic_helper.c
  60. 4 28
      drivers/gpu/drm/drm_blend.c
  61. 18 0
      drivers/gpu/drm/drm_dp_dual_mode_helper.c
  62. 56 23
      drivers/gpu/drm/drm_edid.c
  63. 1 6
      drivers/gpu/drm/drm_fb_helper.c
  64. 3 3
      drivers/gpu/drm/drm_fops.c
  65. 26 2
      drivers/gpu/drm/drm_of.c
  66. 3 2
      drivers/gpu/drm/etnaviv/etnaviv_drv.c
  67. 3 3
      drivers/gpu/drm/etnaviv/etnaviv_gem.c
  68. 23 23
      drivers/gpu/drm/etnaviv/etnaviv_gpu.c
  69. 2 2
      drivers/gpu/drm/etnaviv/etnaviv_gpu.h
  70. 4 3
      drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
  71. 2 1
      drivers/gpu/drm/i2c/tda998x_drv.c
  72. 16 16
      drivers/gpu/drm/i915/i915_gem_request.c
  73. 9 9
      drivers/gpu/drm/i915/i915_gem_request.h
  74. 21 20
      drivers/gpu/drm/i915/i915_sw_fence.c
  75. 4 4
      drivers/gpu/drm/i915/i915_sw_fence.h
  76. 1 1
      drivers/gpu/drm/i915/i915_trace.h
  77. 2 2
      drivers/gpu/drm/i915/intel_breadcrumbs.c
  78. 4 7
      drivers/gpu/drm/i915/intel_dp.c
  79. 1 1
      drivers/gpu/drm/i915/intel_engine_cs.c
  80. 3 1
      drivers/gpu/drm/mediatek/mtk_drm_drv.c
  81. 0 3
      drivers/gpu/drm/msm/adreno/adreno_device.c
  82. 3 0
      drivers/gpu/drm/msm/adreno/adreno_gpu.h
  83. 22 13
      drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
  84. 1 0
      drivers/gpu/drm/msm/msm_debugfs.c
  85. 7 5
      drivers/gpu/drm/msm/msm_drv.c
  86. 1 1
      drivers/gpu/drm/msm/msm_drv.h
  87. 14 14
      drivers/gpu/drm/msm/msm_fence.c
  88. 1 1
      drivers/gpu/drm/msm/msm_fence.h
  89. 7 7
      drivers/gpu/drm/msm/msm_gem.c
  90. 1 1
      drivers/gpu/drm/msm/msm_gem.h
  91. 4 4
      drivers/gpu/drm/msm/msm_gem_submit.c
  92. 1 1
      drivers/gpu/drm/msm/msm_gpu.c
  93. 3 3
      drivers/gpu/drm/nouveau/nouveau_bo.c
  94. 40 40
      drivers/gpu/drm/nouveau/nouveau_fence.c
  95. 3 3
      drivers/gpu/drm/nouveau/nouveau_fence.h
  96. 1 1
      drivers/gpu/drm/nouveau/nouveau_gem.c
  97. 1 1
      drivers/gpu/drm/nouveau/nv04_fence.c
  98. 1 1
      drivers/gpu/drm/nouveau/nv10_fence.c
  99. 1 1
      drivers/gpu/drm/nouveau/nv17_fence.c
  100. 1 1
      drivers/gpu/drm/nouveau/nv50_fence.c

+ 33 - 0
Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt

@@ -0,0 +1,33 @@
+Silicon Image SiI8620 HDMI/MHL bridge bindings
+
+Required properties:
+	- compatible: "sil,sii8620"
+	- reg: i2c address of the bridge
+	- cvcc10-supply: Digital Core Supply Voltage (1.0V)
+	- iovcc18-supply: I/O Supply Voltage (1.8V)
+	- interrupts, interrupt-parent: interrupt specifier of INT pin
+	- reset-gpios: gpio specifier of RESET pin
+	- clocks, clock-names: specification and name of "xtal" clock
+	- video interfaces: Device node can contain video interface port
+			    node for HDMI encoder according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+	sii8620@39 {
+		reg = <0x39>;
+		compatible = "sil,sii8620";
+		cvcc10-supply = <&ldo36_reg>;
+		iovcc18-supply = <&ldo34_reg>;
+		interrupt-parent = <&gpf0>;
+		interrupts = <2 0>;
+		reset-gpio = <&gpv7 0 0>;
+		clocks = <&pmu_system_controller 0>;
+		clock-names = "xtal";
+
+		port {
+			mhl_to_hdmi: endpoint {
+				remote-endpoint = <&hdmi_to_mhl>;
+			};
+		};
+	};

+ 7 - 7
Documentation/sync_file.txt

@@ -6,7 +6,7 @@
 
 
 This document serves as a guide for device drivers writers on what the
 This document serves as a guide for device drivers writers on what the
 sync_file API is, and how drivers can support it. Sync file is the carrier of
 sync_file API is, and how drivers can support it. Sync file is the carrier of
-the fences(struct fence) that are needed to synchronize between drivers or
+the fences(struct dma_fence) that are needed to synchronize between drivers or
 across process boundaries.
 across process boundaries.
 
 
 The sync_file API is meant to be used to send and receive fence information
 The sync_file API is meant to be used to send and receive fence information
@@ -32,9 +32,9 @@ in-fences and out-fences
 Sync files can go either to or from userspace. When a sync_file is sent from
 Sync files can go either to or from userspace. When a sync_file is sent from
 the driver to userspace we call the fences it contains 'out-fences'. They are
 the driver to userspace we call the fences it contains 'out-fences'. They are
 related to a buffer that the driver is processing or is going to process, so
 related to a buffer that the driver is processing or is going to process, so
-the driver creates an out-fence to be able to notify, through fence_signal(),
-when it has finished using (or processing) that buffer. Out-fences are fences
-that the driver creates.
+the driver creates an out-fence to be able to notify, through
+dma_fence_signal(), when it has finished using (or processing) that buffer.
+Out-fences are fences that the driver creates.
 
 
 On the other hand if the driver receives fence(s) through a sync_file from
 On the other hand if the driver receives fence(s) through a sync_file from
 userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
 userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
@@ -47,7 +47,7 @@ Creating Sync Files
 When a driver needs to send an out-fence userspace it creates a sync_file.
 When a driver needs to send an out-fence userspace it creates a sync_file.
 
 
 Interface:
 Interface:
-	struct sync_file *sync_file_create(struct fence *fence);
+	struct sync_file *sync_file_create(struct dma_fence *fence);
 
 
 The caller pass the out-fence and gets back the sync_file. That is just the
 The caller pass the out-fence and gets back the sync_file. That is just the
 first step, next it needs to install an fd on sync_file->file. So it gets an
 first step, next it needs to install an fd on sync_file->file. So it gets an
@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences
 from it.
 from it.
 
 
 Interface:
 Interface:
-	struct fence *sync_file_get_fence(int fd);
+	struct dma_fence *sync_file_get_fence(int fd);
 
 
 
 
 The returned reference is owned by the caller and must be disposed of
 The returned reference is owned by the caller and must be disposed of
-afterwards using fence_put(). In case of error, a NULL is returned instead.
+afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
 
 
 References:
 References:
 [1] struct sync_file in include/linux/sync_file.h
 [1] struct sync_file in include/linux/sync_file.h

+ 3 - 3
drivers/base/Kconfig

@@ -248,11 +248,11 @@ config DMA_SHARED_BUFFER
 	  APIs extension; the file's descriptor can then be passed on to other
 	  APIs extension; the file's descriptor can then be passed on to other
 	  driver.
 	  driver.
 
 
-config FENCE_TRACE
-	bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+	bool "Enable verbose DMA_FENCE_TRACE messages"
 	depends on DMA_SHARED_BUFFER
 	depends on DMA_SHARED_BUFFER
 	help
 	help
-	  Enable the FENCE_TRACE printks. This will add extra
+	  Enable the DMA_FENCE_TRACE printks. This will add extra
 	  spam to the console log, but will make it easier to diagnose
 	  spam to the console log, but will make it easier to diagnose
 	  lockup related problems for dma-buffers shared across multiple
 	  lockup related problems for dma-buffers shared across multiple
 	  devices.
 	  devices.

+ 1 - 1
drivers/dma-buf/Kconfig

@@ -7,7 +7,7 @@ config SYNC_FILE
 	select DMA_SHARED_BUFFER
 	select DMA_SHARED_BUFFER
 	---help---
 	---help---
 	  The Sync File Framework adds explicit syncronization via
 	  The Sync File Framework adds explicit syncronization via
-	  userspace. It enables send/receive 'struct fence' objects to/from
+	  userspace. It enables send/receive 'struct dma_fence' objects to/from
 	  userspace via Sync File fds for synchronization between drivers via
 	  userspace via Sync File fds for synchronization between drivers via
 	  userspace components. It has been ported from Android.
 	  userspace components. It has been ported from Android.
 
 

+ 1 - 1
drivers/dma-buf/Makefile

@@ -1,3 +1,3 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o

+ 14 - 14
drivers/dma-buf/dma-buf.c

@@ -25,7 +25,7 @@
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/dma-buf.h>
 #include <linux/dma-buf.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/anon_inodes.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 	return base + offset;
 	return base + offset;
 }
 }
 
 
-static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 {
 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 	unsigned long flags;
 	unsigned long flags;
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
 	struct dma_buf *dmabuf;
 	struct dma_buf *dmabuf;
 	struct reservation_object *resv;
 	struct reservation_object *resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
-	struct fence *fence_excl;
+	struct dma_fence *fence_excl;
 	unsigned long events;
 	unsigned long events;
 	unsigned shared_count, seq;
 	unsigned shared_count, seq;
 
 
@@ -187,20 +187,20 @@ retry:
 		spin_unlock_irq(&dmabuf->poll.lock);
 		spin_unlock_irq(&dmabuf->poll.lock);
 
 
 		if (events & pevents) {
 		if (events & pevents) {
-			if (!fence_get_rcu(fence_excl)) {
+			if (!dma_fence_get_rcu(fence_excl)) {
 				/* force a recheck */
 				/* force a recheck */
 				events &= ~pevents;
 				events &= ~pevents;
 				dma_buf_poll_cb(NULL, &dcb->cb);
 				dma_buf_poll_cb(NULL, &dcb->cb);
-			} else if (!fence_add_callback(fence_excl, &dcb->cb,
-						       dma_buf_poll_cb)) {
+			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+							   dma_buf_poll_cb)) {
 				events &= ~pevents;
 				events &= ~pevents;
-				fence_put(fence_excl);
+				dma_fence_put(fence_excl);
 			} else {
 			} else {
 				/*
 				/*
 				 * No callback queued, wake up any additional
 				 * No callback queued, wake up any additional
 				 * waiters.
 				 * waiters.
 				 */
 				 */
-				fence_put(fence_excl);
+				dma_fence_put(fence_excl);
 				dma_buf_poll_cb(NULL, &dcb->cb);
 				dma_buf_poll_cb(NULL, &dcb->cb);
 			}
 			}
 		}
 		}
@@ -222,9 +222,9 @@ retry:
 			goto out;
 			goto out;
 
 
 		for (i = 0; i < shared_count; ++i) {
 		for (i = 0; i < shared_count; ++i) {
-			struct fence *fence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
 
-			if (!fence_get_rcu(fence)) {
+			if (!dma_fence_get_rcu(fence)) {
 				/*
 				/*
 				 * fence refcount dropped to zero, this means
 				 * fence refcount dropped to zero, this means
 				 * that fobj has been freed
 				 * that fobj has been freed
@@ -235,13 +235,13 @@ retry:
 				dma_buf_poll_cb(NULL, &dcb->cb);
 				dma_buf_poll_cb(NULL, &dcb->cb);
 				break;
 				break;
 			}
 			}
-			if (!fence_add_callback(fence, &dcb->cb,
-						dma_buf_poll_cb)) {
-				fence_put(fence);
+			if (!dma_fence_add_callback(fence, &dcb->cb,
+						    dma_buf_poll_cb)) {
+				dma_fence_put(fence);
 				events &= ~POLLOUT;
 				events &= ~POLLOUT;
 				break;
 				break;
 			}
 			}
-			fence_put(fence);
+			dma_fence_put(fence);
 		}
 		}
 
 
 		/* No callback queued, wake up any additional waiters. */
 		/* No callback queued, wake up any additional waiters. */

+ 46 - 45
drivers/dma-buf/fence-array.c → drivers/dma-buf/dma-fence-array.c

@@ -1,5 +1,5 @@
 /*
 /*
- * fence-array: aggregate fences to be waited together
+ * dma-fence-array: aggregate fences to be waited together
  *
  *
  * Copyright (C) 2016 Collabora Ltd
  * Copyright (C) 2016 Collabora Ltd
  * Copyright (C) 2016 Advanced Micro Devices, Inc.
  * Copyright (C) 2016 Advanced Micro Devices, Inc.
@@ -19,35 +19,34 @@
 
 
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
 
 
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
-
-static const char *fence_array_get_driver_name(struct fence *fence)
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
 {
 {
-	return "fence_array";
+	return "dma_fence_array";
 }
 }
 
 
-static const char *fence_array_get_timeline_name(struct fence *fence)
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
 {
 {
 	return "unbound";
 	return "unbound";
 }
 }
 
 
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+static void dma_fence_array_cb_func(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
 {
 {
-	struct fence_array_cb *array_cb =
-		container_of(cb, struct fence_array_cb, cb);
-	struct fence_array *array = array_cb->array;
+	struct dma_fence_array_cb *array_cb =
+		container_of(cb, struct dma_fence_array_cb, cb);
+	struct dma_fence_array *array = array_cb->array;
 
 
 	if (atomic_dec_and_test(&array->num_pending))
 	if (atomic_dec_and_test(&array->num_pending))
-		fence_signal(&array->base);
-	fence_put(&array->base);
+		dma_fence_signal(&array->base);
+	dma_fence_put(&array->base);
 }
 }
 
 
-static bool fence_array_enable_signaling(struct fence *fence)
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
 {
 {
-	struct fence_array *array = to_fence_array(fence);
-	struct fence_array_cb *cb = (void *)(&array[1]);
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	struct dma_fence_array_cb *cb = (void *)(&array[1]);
 	unsigned i;
 	unsigned i;
 
 
 	for (i = 0; i < array->num_fences; ++i) {
 	for (i = 0; i < array->num_fences; ++i) {
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
 		 * until we signal the array as complete (but that is now
 		 * until we signal the array as complete (but that is now
 		 * insufficient).
 		 * insufficient).
 		 */
 		 */
-		fence_get(&array->base);
-		if (fence_add_callback(array->fences[i], &cb[i].cb,
-				       fence_array_cb_func)) {
-			fence_put(&array->base);
+		dma_fence_get(&array->base);
+		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+					   dma_fence_array_cb_func)) {
+			dma_fence_put(&array->base);
 			if (atomic_dec_and_test(&array->num_pending))
 			if (atomic_dec_and_test(&array->num_pending))
 				return false;
 				return false;
 		}
 		}
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence)
 	return true;
 	return true;
 }
 }
 
 
-static bool fence_array_signaled(struct fence *fence)
+static bool dma_fence_array_signaled(struct dma_fence *fence)
 {
 {
-	struct fence_array *array = to_fence_array(fence);
+	struct dma_fence_array *array = to_dma_fence_array(fence);
 
 
 	return atomic_read(&array->num_pending) <= 0;
 	return atomic_read(&array->num_pending) <= 0;
 }
 }
 
 
-static void fence_array_release(struct fence *fence)
+static void dma_fence_array_release(struct dma_fence *fence)
 {
 {
-	struct fence_array *array = to_fence_array(fence);
+	struct dma_fence_array *array = to_dma_fence_array(fence);
 	unsigned i;
 	unsigned i;
 
 
 	for (i = 0; i < array->num_fences; ++i)
 	for (i = 0; i < array->num_fences; ++i)
-		fence_put(array->fences[i]);
+		dma_fence_put(array->fences[i]);
 
 
 	kfree(array->fences);
 	kfree(array->fences);
-	fence_free(fence);
+	dma_fence_free(fence);
 }
 }
 
 
-const struct fence_ops fence_array_ops = {
-	.get_driver_name = fence_array_get_driver_name,
-	.get_timeline_name = fence_array_get_timeline_name,
-	.enable_signaling = fence_array_enable_signaling,
-	.signaled = fence_array_signaled,
-	.wait = fence_default_wait,
-	.release = fence_array_release,
+const struct dma_fence_ops dma_fence_array_ops = {
+	.get_driver_name = dma_fence_array_get_driver_name,
+	.get_timeline_name = dma_fence_array_get_timeline_name,
+	.enable_signaling = dma_fence_array_enable_signaling,
+	.signaled = dma_fence_array_signaled,
+	.wait = dma_fence_default_wait,
+	.release = dma_fence_array_release,
 };
 };
-EXPORT_SYMBOL(fence_array_ops);
+EXPORT_SYMBOL(dma_fence_array_ops);
 
 
 /**
 /**
- * fence_array_create - Create a custom fence array
+ * dma_fence_array_create - Create a custom fence array
  * @num_fences:		[in]	number of fences to add in the array
  * @num_fences:		[in]	number of fences to add in the array
  * @fences:		[in]	array containing the fences
  * @fences:		[in]	array containing the fences
  * @context:		[in]	fence context to use
  * @context:		[in]	fence context to use
  * @seqno:		[in]	sequence number to use
  * @seqno:		[in]	sequence number to use
  * @signal_on_any:	[in]	signal on any fence in the array
  * @signal_on_any:	[in]	signal on any fence in the array
  *
  *
- * Allocate a fence_array object and initialize the base fence with fence_init().
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
  * In case of error it returns NULL.
  * In case of error it returns NULL.
  *
  *
  * The caller should allocate the fences array with num_fences size
  * The caller should allocate the fences array with num_fences size
  * and fill it with the fences it wants to add to the object. Ownership of this
  * and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and fence_put() is used on each fence on release.
+ * array is taken and dma_fence_put() is used on each fence on release.
  *
  *
  * If @signal_on_any is true the fence array signals if any fence in the array
  * If @signal_on_any is true the fence array signals if any fence in the array
  * signals, otherwise it signals when all fences in the array signal.
  * signals, otherwise it signals when all fences in the array signal.
  */
  */
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
-				       u64 context, unsigned seqno,
-				       bool signal_on_any)
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+					       struct dma_fence **fences,
+					       u64 context, unsigned seqno,
+					       bool signal_on_any)
 {
 {
-	struct fence_array *array;
+	struct dma_fence_array *array;
 	size_t size = sizeof(*array);
 	size_t size = sizeof(*array);
 
 
 	/* Allocate the callback structures behind the array. */
 	/* Allocate the callback structures behind the array. */
-	size += num_fences * sizeof(struct fence_array_cb);
+	size += num_fences * sizeof(struct dma_fence_array_cb);
 	array = kzalloc(size, GFP_KERNEL);
 	array = kzalloc(size, GFP_KERNEL);
 	if (!array)
 	if (!array)
 		return NULL;
 		return NULL;
 
 
 	spin_lock_init(&array->lock);
 	spin_lock_init(&array->lock);
-	fence_init(&array->base, &fence_array_ops, &array->lock,
-		   context, seqno);
+	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+		       context, seqno);
 
 
 	array->num_fences = num_fences;
 	array->num_fences = num_fences;
 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
 
 
 	return array;
 	return array;
 }
 }
-EXPORT_SYMBOL(fence_array_create);
+EXPORT_SYMBOL(dma_fence_array_create);

+ 103 - 98
drivers/dma-buf/fence.c → drivers/dma-buf/dma-fence.c

@@ -21,13 +21,13 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/atomic.h>
 #include <linux/atomic.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 #define CREATE_TRACE_POINTS
 #define CREATE_TRACE_POINTS
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
 
 
-EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
-EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 
 
 /*
 /*
  * fence context counter: each execution context should have its own
  * fence context counter: each execution context should have its own
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
  * context or not. One device can have multiple separate contexts,
  * context or not. One device can have multiple separate contexts,
  * and they're used if some engine can run independently of another.
  * and they're used if some engine can run independently of another.
  */
  */
-static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
 
 
 /**
 /**
- * fence_context_alloc - allocate an array of fence contexts
+ * dma_fence_context_alloc - allocate an array of fence contexts
  * @num:	[in]	amount of contexts to allocate
  * @num:	[in]	amount of contexts to allocate
  *
  *
  * This function will return the first index of the number of fences allocated.
  * This function will return the first index of the number of fences allocated.
  * The fence context is used for setting fence->context to a unique number.
  * The fence context is used for setting fence->context to a unique number.
  */
  */
-u64 fence_context_alloc(unsigned num)
+u64 dma_fence_context_alloc(unsigned num)
 {
 {
 	BUG_ON(!num);
 	BUG_ON(!num);
-	return atomic64_add_return(num, &fence_context_counter) - num;
+	return atomic64_add_return(num, &dma_fence_context_counter) - num;
 }
 }
-EXPORT_SYMBOL(fence_context_alloc);
+EXPORT_SYMBOL(dma_fence_context_alloc);
 
 
 /**
 /**
- * fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_locked - signal completion of a fence
  * @fence: the fence to signal
  * @fence: the fence to signal
  *
  *
  * Signal completion for software callbacks on a fence, this will unblock
  * Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
  * can only go from unsignaled to signaled state, it will only be effective
  * can only go from unsignaled to signaled state, it will only be effective
  * the first time.
  * the first time.
  *
  *
- * Unlike fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal, this function must be called with fence->lock held.
  */
  */
-int fence_signal_locked(struct fence *fence)
+int dma_fence_signal_locked(struct dma_fence *fence)
 {
 {
-	struct fence_cb *cur, *tmp;
+	struct dma_fence_cb *cur, *tmp;
 	int ret = 0;
 	int ret = 0;
 
 
+	lockdep_assert_held(fence->lock);
+
 	if (WARN_ON(!fence))
 	if (WARN_ON(!fence))
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
 		smp_mb__before_atomic();
 		smp_mb__before_atomic();
 	}
 	}
 
 
-	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		ret = -EINVAL;
 		ret = -EINVAL;
 
 
 		/*
 		/*
-		 * we might have raced with the unlocked fence_signal,
+		 * we might have raced with the unlocked dma_fence_signal,
 		 * still run through all callbacks
 		 * still run through all callbacks
 		 */
 		 */
 	} else
 	} else
-		trace_fence_signaled(fence);
+		trace_dma_fence_signaled(fence);
 
 
 	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 		list_del_init(&cur->node);
 		list_del_init(&cur->node);
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
 	}
 	}
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(fence_signal_locked);
+EXPORT_SYMBOL(dma_fence_signal_locked);
 
 
 /**
 /**
- * fence_signal - signal completion of a fence
+ * dma_fence_signal - signal completion of a fence
  * @fence: the fence to signal
  * @fence: the fence to signal
  *
  *
  * Signal completion for software callbacks on a fence, this will unblock
  * Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
  * can only go from unsignaled to signaled state, it will only be effective
  * can only go from unsignaled to signaled state, it will only be effective
  * the first time.
  * the first time.
  */
  */
-int fence_signal(struct fence *fence)
+int dma_fence_signal(struct dma_fence *fence)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
 		smp_mb__before_atomic();
 		smp_mb__before_atomic();
 	}
 	}
 
 
-	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	trace_fence_signaled(fence);
+	trace_dma_fence_signaled(fence);
 
 
-	if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
-		struct fence_cb *cur, *tmp;
+	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+		struct dma_fence_cb *cur, *tmp;
 
 
 		spin_lock_irqsave(fence->lock, flags);
 		spin_lock_irqsave(fence->lock, flags);
 		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
 	}
 	}
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL(fence_signal);
+EXPORT_SYMBOL(dma_fence_signal);
 
 
 /**
 /**
- * fence_wait_timeout - sleep until the fence gets signaled
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
  * or until timeout elapses
  * or until timeout elapses
  * @fence:	[in]	the fence to wait on
  * @fence:	[in]	the fence to wait on
  * @intr:	[in]	if true, do an interruptible wait
  * @intr:	[in]	if true, do an interruptible wait
@@ -152,7 +154,7 @@ EXPORT_SYMBOL(fence_signal);
  * freed before return, resulting in undefined behavior.
  * freed before return, resulting in undefined behavior.
  */
  */
 signed long
 signed long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
 {
 {
 	signed long ret;
 	signed long ret;
 
 
@@ -160,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (timeout == 0)
 	if (timeout == 0)
-		return fence_is_signaled(fence);
+		return dma_fence_is_signaled(fence);
 
 
-	trace_fence_wait_start(fence);
+	trace_dma_fence_wait_start(fence);
 	ret = fence->ops->wait(fence, intr, timeout);
 	ret = fence->ops->wait(fence, intr, timeout);
-	trace_fence_wait_end(fence);
+	trace_dma_fence_wait_end(fence);
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(fence_wait_timeout);
+EXPORT_SYMBOL(dma_fence_wait_timeout);
 
 
-void fence_release(struct kref *kref)
+void dma_fence_release(struct kref *kref)
 {
 {
-	struct fence *fence =
-			container_of(kref, struct fence, refcount);
+	struct dma_fence *fence =
+		container_of(kref, struct dma_fence, refcount);
 
 
-	trace_fence_destroy(fence);
+	trace_dma_fence_destroy(fence);
 
 
 	BUG_ON(!list_empty(&fence->cb_list));
 	BUG_ON(!list_empty(&fence->cb_list));
 
 
 	if (fence->ops->release)
 	if (fence->ops->release)
 		fence->ops->release(fence);
 		fence->ops->release(fence);
 	else
 	else
-		fence_free(fence);
+		dma_fence_free(fence);
 }
 }
-EXPORT_SYMBOL(fence_release);
+EXPORT_SYMBOL(dma_fence_release);
 
 
-void fence_free(struct fence *fence)
+void dma_fence_free(struct dma_fence *fence)
 {
 {
 	kfree_rcu(fence, rcu);
 	kfree_rcu(fence, rcu);
 }
 }
-EXPORT_SYMBOL(fence_free);
+EXPORT_SYMBOL(dma_fence_free);
 
 
 /**
 /**
- * fence_enable_sw_signaling - enable signaling on fence
+ * dma_fence_enable_sw_signaling - enable signaling on fence
  * @fence:	[in]	the fence to enable
  * @fence:	[in]	the fence to enable
  *
  *
  * this will request for sw signaling to be enabled, to make the fence
  * this will request for sw signaling to be enabled, to make the fence
  * complete as soon as possible
  * complete as soon as possible
  */
  */
-void fence_enable_sw_signaling(struct fence *fence)
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
-	    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-		trace_fence_enable_signal(fence);
+	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			      &fence->flags) &&
+	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		trace_dma_fence_enable_signal(fence);
 
 
 		spin_lock_irqsave(fence->lock, flags);
 		spin_lock_irqsave(fence->lock, flags);
 
 
 		if (!fence->ops->enable_signaling(fence))
 		if (!fence->ops->enable_signaling(fence))
-			fence_signal_locked(fence);
+			dma_fence_signal_locked(fence);
 
 
 		spin_unlock_irqrestore(fence->lock, flags);
 		spin_unlock_irqrestore(fence->lock, flags);
 	}
 	}
 }
 }
-EXPORT_SYMBOL(fence_enable_sw_signaling);
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 
 
 /**
 /**
- * fence_add_callback - add a callback to be called when the fence
+ * dma_fence_add_callback - add a callback to be called when the fence
  * is signaled
  * is signaled
  * @fence:	[in]	the fence to wait on
  * @fence:	[in]	the fence to wait on
  * @cb:		[in]	the callback to register
  * @cb:		[in]	the callback to register
  * @func:	[in]	the function to call
  * @func:	[in]	the function to call
  *
  *
- * cb will be initialized by fence_add_callback, no initialization
+ * cb will be initialized by dma_fence_add_callback, no initialization
  * by the caller is required. Any number of callbacks can be registered
  * by the caller is required. Any number of callbacks can be registered
  * to a fence, but a callback can only be registered to one fence at a time.
  * to a fence, but a callback can only be registered to one fence at a time.
  *
  *
@@ -232,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
  * *not* call the callback)
  * *not* call the callback)
  *
  *
  * Add a software callback to the fence. Same restrictions apply to
  * Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to fence_wait, however the caller doesn't need to
+ * refcount as it does to dma_fence_wait, however the caller doesn't need to
  * keep a refcount to fence afterwards: when software access is enabled,
  * keep a refcount to fence afterwards: when software access is enabled,
  * the creator of the fence is required to keep the fence alive until
  * the creator of the fence is required to keep the fence alive until
- * after it signals with fence_signal. The callback itself can be called
+ * after it signals with dma_fence_signal. The callback itself can be called
  * from irq context.
  * from irq context.
  *
  *
  */
  */
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
-		       fence_func_t func)
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+			   dma_fence_func_t func)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
@@ -249,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
 	if (WARN_ON(!fence || !func))
 	if (WARN_ON(!fence || !func))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		INIT_LIST_HEAD(&cb->node);
 		INIT_LIST_HEAD(&cb->node);
 		return -ENOENT;
 		return -ENOENT;
 	}
 	}
 
 
 	spin_lock_irqsave(fence->lock, flags);
 	spin_lock_irqsave(fence->lock, flags);
 
 
-	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				   &fence->flags);
 
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		ret = -ENOENT;
 		ret = -ENOENT;
 	else if (!was_set) {
 	else if (!was_set) {
-		trace_fence_enable_signal(fence);
+		trace_dma_fence_enable_signal(fence);
 
 
 		if (!fence->ops->enable_signaling(fence)) {
 		if (!fence->ops->enable_signaling(fence)) {
-			fence_signal_locked(fence);
+			dma_fence_signal_locked(fence);
 			ret = -ENOENT;
 			ret = -ENOENT;
 		}
 		}
 	}
 	}
@@ -278,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
 
 
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(fence_add_callback);
+EXPORT_SYMBOL(dma_fence_add_callback);
 
 
 /**
 /**
- * fence_remove_callback - remove a callback from the signaling list
+ * dma_fence_remove_callback - remove a callback from the signaling list
  * @fence:	[in]	the fence to wait on
  * @fence:	[in]	the fence to wait on
  * @cb:		[in]	the callback to remove
  * @cb:		[in]	the callback to remove
  *
  *
@@ -296,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback);
  * with a reference held to the fence.
  * with a reference held to the fence.
  */
  */
 bool
 bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	bool ret;
 	bool ret;
@@ -311,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
 
 
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(fence_remove_callback);
+EXPORT_SYMBOL(dma_fence_remove_callback);
 
 
 struct default_wait_cb {
 struct default_wait_cb {
-	struct fence_cb base;
+	struct dma_fence_cb base;
 	struct task_struct *task;
 	struct task_struct *task;
 };
 };
 
 
 static void
 static void
-fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 {
 	struct default_wait_cb *wait =
 	struct default_wait_cb *wait =
 		container_of(cb, struct default_wait_cb, base);
 		container_of(cb, struct default_wait_cb, base);
@@ -328,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
 }
 }
 
 
 /**
 /**
- * fence_default_wait - default sleep until the fence gets signaled
+ * dma_fence_default_wait - default sleep until the fence gets signaled
  * or until timeout elapses
  * or until timeout elapses
  * @fence:	[in]	the fence to wait on
  * @fence:	[in]	the fence to wait on
  * @intr:	[in]	if true, do an interruptible wait
  * @intr:	[in]	if true, do an interruptible wait
@@ -338,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
  * remaining timeout in jiffies on success.
  * remaining timeout in jiffies on success.
  */
  */
 signed long
 signed long
-fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
 {
 {
 	struct default_wait_cb cb;
 	struct default_wait_cb cb;
 	unsigned long flags;
 	unsigned long flags;
 	signed long ret = timeout;
 	signed long ret = timeout;
 	bool was_set;
 	bool was_set;
 
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return timeout;
 		return timeout;
 
 
 	spin_lock_irqsave(fence->lock, flags);
 	spin_lock_irqsave(fence->lock, flags);
@@ -355,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+				   &fence->flags);
 
 
-	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		goto out;
 		goto out;
 
 
 	if (!was_set) {
 	if (!was_set) {
-		trace_fence_enable_signal(fence);
+		trace_dma_fence_enable_signal(fence);
 
 
 		if (!fence->ops->enable_signaling(fence)) {
 		if (!fence->ops->enable_signaling(fence)) {
-			fence_signal_locked(fence);
+			dma_fence_signal_locked(fence);
 			goto out;
 			goto out;
 		}
 		}
 	}
 	}
 
 
-	cb.base.func = fence_default_wait_cb;
+	cb.base.func = dma_fence_default_wait_cb;
 	cb.task = current;
 	cb.task = current;
 	list_add(&cb.base.node, &fence->cb_list);
 	list_add(&cb.base.node, &fence->cb_list);
 
 
-	while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
 		if (intr)
 		if (intr)
 			__set_current_state(TASK_INTERRUPTIBLE);
 			__set_current_state(TASK_INTERRUPTIBLE);
 		else
 		else
@@ -395,23 +400,23 @@ out:
 	spin_unlock_irqrestore(fence->lock, flags);
 	spin_unlock_irqrestore(fence->lock, flags);
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(fence_default_wait);
+EXPORT_SYMBOL(dma_fence_default_wait);
 
 
 static bool
 static bool
-fence_test_signaled_any(struct fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
 {
 {
 	int i;
 	int i;
 
 
 	for (i = 0; i < count; ++i) {
 	for (i = 0; i < count; ++i) {
-		struct fence *fence = fences[i];
-		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		struct dma_fence *fence = fences[i];
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 			return true;
 			return true;
 	}
 	}
 	return false;
 	return false;
 }
 }
 
 
 /**
 /**
- * fence_wait_any_timeout - sleep until any fence gets signaled
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
  * or until timeout elapses
  * or until timeout elapses
  * @fences:	[in]	array of fences to wait on
  * @fences:	[in]	array of fences to wait on
  * @count:	[in]	number of fences to wait on
  * @count:	[in]	number of fences to wait on
@@ -427,8 +432,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
  * fence might be freed before return, resulting in undefined behavior.
  * fence might be freed before return, resulting in undefined behavior.
  */
  */
 signed long
 signed long
-fence_wait_any_timeout(struct fence **fences, uint32_t count,
-		       bool intr, signed long timeout)
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+			   bool intr, signed long timeout)
 {
 {
 	struct default_wait_cb *cb;
 	struct default_wait_cb *cb;
 	signed long ret = timeout;
 	signed long ret = timeout;
@@ -439,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 
 
 	if (timeout == 0) {
 	if (timeout == 0) {
 		for (i = 0; i < count; ++i)
 		for (i = 0; i < count; ++i)
-			if (fence_is_signaled(fences[i]))
+			if (dma_fence_is_signaled(fences[i]))
 				return 1;
 				return 1;
 
 
 		return 0;
 		return 0;
@@ -452,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 	}
 	}
 
 
 	for (i = 0; i < count; ++i) {
 	for (i = 0; i < count; ++i) {
-		struct fence *fence = fences[i];
+		struct dma_fence *fence = fences[i];
 
 
-		if (fence->ops->wait != fence_default_wait) {
+		if (fence->ops->wait != dma_fence_default_wait) {
 			ret = -EINVAL;
 			ret = -EINVAL;
 			goto fence_rm_cb;
 			goto fence_rm_cb;
 		}
 		}
 
 
 		cb[i].task = current;
 		cb[i].task = current;
-		if (fence_add_callback(fence, &cb[i].base,
-				       fence_default_wait_cb)) {
+		if (dma_fence_add_callback(fence, &cb[i].base,
+					   dma_fence_default_wait_cb)) {
 			/* This fence is already signaled */
 			/* This fence is already signaled */
 			goto fence_rm_cb;
 			goto fence_rm_cb;
 		}
 		}
@@ -473,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 		else
 		else
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
 
-		if (fence_test_signaled_any(fences, count))
+		if (dma_fence_test_signaled_any(fences, count))
 			break;
 			break;
 
 
 		ret = schedule_timeout(ret);
 		ret = schedule_timeout(ret);
@@ -486,34 +491,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
 
 
 fence_rm_cb:
 fence_rm_cb:
 	while (i-- > 0)
 	while (i-- > 0)
-		fence_remove_callback(fences[i], &cb[i].base);
+		dma_fence_remove_callback(fences[i], &cb[i].base);
 
 
 err_free_cb:
 err_free_cb:
 	kfree(cb);
 	kfree(cb);
 
 
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(fence_wait_any_timeout);
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
 
 
 /**
 /**
- * fence_init - Initialize a custom fence.
+ * dma_fence_init - Initialize a custom fence.
  * @fence:	[in]	the fence to initialize
  * @fence:	[in]	the fence to initialize
- * @ops:	[in]	the fence_ops for operations on this fence
+ * @ops:	[in]	the dma_fence_ops for operations on this fence
  * @lock:	[in]	the irqsafe spinlock to use for locking this fence
  * @lock:	[in]	the irqsafe spinlock to use for locking this fence
  * @context:	[in]	the execution context this fence is run on
  * @context:	[in]	the execution context this fence is run on
  * @seqno:	[in]	a linear increasing sequence number for this context
  * @seqno:	[in]	a linear increasing sequence number for this context
  *
  *
  * Initializes an allocated fence, the caller doesn't have to keep its
  * Initializes an allocated fence, the caller doesn't have to keep its
  * refcount after committing with this fence, but it will need to hold a
  * refcount after committing with this fence, but it will need to hold a
- * refcount again if fence_ops.enable_signaling gets called. This can
+ * refcount again if dma_fence_ops.enable_signaling gets called. This can
  * be used for other implementing other types of fence.
  * be used for other implementing other types of fence.
  *
  *
  * context and seqno are used for easy comparison between fences, allowing
  * context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using fence_later.
+ * to check which fence is later by simply using dma_fence_later.
  */
  */
 void
 void
-fence_init(struct fence *fence, const struct fence_ops *ops,
-	     spinlock_t *lock, u64 context, unsigned seqno)
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+	       spinlock_t *lock, u64 context, unsigned seqno)
 {
 {
 	BUG_ON(!lock);
 	BUG_ON(!lock);
 	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
 	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -527,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
 	fence->seqno = seqno;
 	fence->seqno = seqno;
 	fence->flags = 0UL;
 	fence->flags = 0UL;
 
 
-	trace_fence_init(fence);
+	trace_dma_fence_init(fence);
 }
 }
-EXPORT_SYMBOL(fence_init);
+EXPORT_SYMBOL(dma_fence_init);

+ 48 - 46
drivers/dma-buf/reservation.c

@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
 static void
 static void
 reservation_object_add_shared_inplace(struct reservation_object *obj,
 reservation_object_add_shared_inplace(struct reservation_object *obj,
 				      struct reservation_object_list *fobj,
 				      struct reservation_object_list *fobj,
-				      struct fence *fence)
+				      struct dma_fence *fence)
 {
 {
 	u32 i;
 	u32 i;
 
 
-	fence_get(fence);
+	dma_fence_get(fence);
 
 
 	preempt_disable();
 	preempt_disable();
 	write_seqcount_begin(&obj->seq);
 	write_seqcount_begin(&obj->seq);
 
 
 	for (i = 0; i < fobj->shared_count; ++i) {
 	for (i = 0; i < fobj->shared_count; ++i) {
-		struct fence *old_fence;
+		struct dma_fence *old_fence;
 
 
 		old_fence = rcu_dereference_protected(fobj->shared[i],
 		old_fence = rcu_dereference_protected(fobj->shared[i],
 						reservation_object_held(obj));
 						reservation_object_held(obj));
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
 			write_seqcount_end(&obj->seq);
 			write_seqcount_end(&obj->seq);
 			preempt_enable();
 			preempt_enable();
 
 
-			fence_put(old_fence);
+			dma_fence_put(old_fence);
 			return;
 			return;
 		}
 		}
 	}
 	}
@@ -143,12 +143,12 @@ static void
 reservation_object_add_shared_replace(struct reservation_object *obj,
 reservation_object_add_shared_replace(struct reservation_object *obj,
 				      struct reservation_object_list *old,
 				      struct reservation_object_list *old,
 				      struct reservation_object_list *fobj,
 				      struct reservation_object_list *fobj,
-				      struct fence *fence)
+				      struct dma_fence *fence)
 {
 {
 	unsigned i;
 	unsigned i;
-	struct fence *old_fence = NULL;
+	struct dma_fence *old_fence = NULL;
 
 
-	fence_get(fence);
+	dma_fence_get(fence);
 
 
 	if (!old) {
 	if (!old) {
 		RCU_INIT_POINTER(fobj->shared[0], fence);
 		RCU_INIT_POINTER(fobj->shared[0], fence);
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
 	fobj->shared_count = old->shared_count;
 	fobj->shared_count = old->shared_count;
 
 
 	for (i = 0; i < old->shared_count; ++i) {
 	for (i = 0; i < old->shared_count; ++i) {
-		struct fence *check;
+		struct dma_fence *check;
 
 
 		check = rcu_dereference_protected(old->shared[i],
 		check = rcu_dereference_protected(old->shared[i],
 						reservation_object_held(obj));
 						reservation_object_held(obj));
@@ -196,7 +196,7 @@ done:
 		kfree_rcu(old, rcu);
 		kfree_rcu(old, rcu);
 
 
 	if (old_fence)
 	if (old_fence)
-		fence_put(old_fence);
+		dma_fence_put(old_fence);
 }
 }
 
 
 /**
 /**
@@ -208,7 +208,7 @@ done:
  * reservation_object_reserve_shared() has been called.
  * reservation_object_reserve_shared() has been called.
  */
  */
 void reservation_object_add_shared_fence(struct reservation_object *obj,
 void reservation_object_add_shared_fence(struct reservation_object *obj,
-					 struct fence *fence)
+					 struct dma_fence *fence)
 {
 {
 	struct reservation_object_list *old, *fobj = obj->staged;
 	struct reservation_object_list *old, *fobj = obj->staged;
 
 
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
  * Add a fence to the exclusive slot.  The obj->lock must be held.
  * Add a fence to the exclusive slot.  The obj->lock must be held.
  */
  */
 void reservation_object_add_excl_fence(struct reservation_object *obj,
 void reservation_object_add_excl_fence(struct reservation_object *obj,
-				       struct fence *fence)
+				       struct dma_fence *fence)
 {
 {
-	struct fence *old_fence = reservation_object_get_excl(obj);
+	struct dma_fence *old_fence = reservation_object_get_excl(obj);
 	struct reservation_object_list *old;
 	struct reservation_object_list *old;
 	u32 i = 0;
 	u32 i = 0;
 
 
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 		i = old->shared_count;
 		i = old->shared_count;
 
 
 	if (fence)
 	if (fence)
-		fence_get(fence);
+		dma_fence_get(fence);
 
 
 	preempt_disable();
 	preempt_disable();
 	write_seqcount_begin(&obj->seq);
 	write_seqcount_begin(&obj->seq);
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 
 
 	/* inplace update, no shared fences */
 	/* inplace update, no shared fences */
 	while (i--)
 	while (i--)
-		fence_put(rcu_dereference_protected(old->shared[i],
+		dma_fence_put(rcu_dereference_protected(old->shared[i],
 						reservation_object_held(obj)));
 						reservation_object_held(obj)));
 
 
 	if (old_fence)
 	if (old_fence)
-		fence_put(old_fence);
+		dma_fence_put(old_fence);
 }
 }
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
 
@@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
  * Zero or -errno
  * Zero or -errno
  */
  */
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
-				      struct fence **pfence_excl,
+				      struct dma_fence **pfence_excl,
 				      unsigned *pshared_count,
 				      unsigned *pshared_count,
-				      struct fence ***pshared)
+				      struct dma_fence ***pshared)
 {
 {
-	struct fence **shared = NULL;
-	struct fence *fence_excl;
+	struct dma_fence **shared = NULL;
+	struct dma_fence *fence_excl;
 	unsigned int shared_count;
 	unsigned int shared_count;
 	int ret = 1;
 	int ret = 1;
 
 
@@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 		seq = read_seqcount_begin(&obj->seq);
 		seq = read_seqcount_begin(&obj->seq);
 
 
 		fence_excl = rcu_dereference(obj->fence_excl);
 		fence_excl = rcu_dereference(obj->fence_excl);
-		if (fence_excl && !fence_get_rcu(fence_excl))
+		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 			goto unlock;
 			goto unlock;
 
 
 		fobj = rcu_dereference(obj->fence);
 		fobj = rcu_dereference(obj->fence);
 		if (fobj) {
 		if (fobj) {
-			struct fence **nshared;
+			struct dma_fence **nshared;
 			size_t sz = sizeof(*shared) * fobj->shared_max;
 			size_t sz = sizeof(*shared) * fobj->shared_max;
 
 
 			nshared = krealloc(shared, sz,
 			nshared = krealloc(shared, sz,
@@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 
 
 			for (i = 0; i < shared_count; ++i) {
 			for (i = 0; i < shared_count; ++i) {
 				shared[i] = rcu_dereference(fobj->shared[i]);
 				shared[i] = rcu_dereference(fobj->shared[i]);
-				if (!fence_get_rcu(shared[i]))
+				if (!dma_fence_get_rcu(shared[i]))
 					break;
 					break;
 			}
 			}
 		}
 		}
 
 
 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
 			while (i--)
 			while (i--)
-				fence_put(shared[i]);
-			fence_put(fence_excl);
+				dma_fence_put(shared[i]);
+			dma_fence_put(fence_excl);
 			goto unlock;
 			goto unlock;
 		}
 		}
 
 
@@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 					 bool wait_all, bool intr,
 					 bool wait_all, bool intr,
 					 unsigned long timeout)
 					 unsigned long timeout)
 {
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned seq, shared_count, i = 0;
 	unsigned seq, shared_count, i = 0;
 	long ret = timeout;
 	long ret = timeout;
 
 
@@ -389,16 +389,17 @@ retry:
 			shared_count = fobj->shared_count;
 			shared_count = fobj->shared_count;
 
 
 		for (i = 0; i < shared_count; ++i) {
 		for (i = 0; i < shared_count; ++i) {
-			struct fence *lfence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
 
 
-			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+				     &lfence->flags))
 				continue;
 				continue;
 
 
-			if (!fence_get_rcu(lfence))
+			if (!dma_fence_get_rcu(lfence))
 				goto unlock_retry;
 				goto unlock_retry;
 
 
-			if (fence_is_signaled(lfence)) {
-				fence_put(lfence);
+			if (dma_fence_is_signaled(lfence)) {
+				dma_fence_put(lfence);
 				continue;
 				continue;
 			}
 			}
 
 
@@ -408,15 +409,16 @@ retry:
 	}
 	}
 
 
 	if (!shared_count) {
 	if (!shared_count) {
-		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
 
 
 		if (fence_excl &&
 		if (fence_excl &&
-		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
-			if (!fence_get_rcu(fence_excl))
+		    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			      &fence_excl->flags)) {
+			if (!dma_fence_get_rcu(fence_excl))
 				goto unlock_retry;
 				goto unlock_retry;
 
 
-			if (fence_is_signaled(fence_excl))
-				fence_put(fence_excl);
+			if (dma_fence_is_signaled(fence_excl))
+				dma_fence_put(fence_excl);
 			else
 			else
 				fence = fence_excl;
 				fence = fence_excl;
 		}
 		}
@@ -425,12 +427,12 @@ retry:
 	rcu_read_unlock();
 	rcu_read_unlock();
 	if (fence) {
 	if (fence) {
 		if (read_seqcount_retry(&obj->seq, seq)) {
 		if (read_seqcount_retry(&obj->seq, seq)) {
-			fence_put(fence);
+			dma_fence_put(fence);
 			goto retry;
 			goto retry;
 		}
 		}
 
 
-		ret = fence_wait_timeout(fence, intr, ret);
-		fence_put(fence);
+		ret = dma_fence_wait_timeout(fence, intr, ret);
+		dma_fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
 			goto retry;
 			goto retry;
 	}
 	}
@@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
 
 
 
 
 static inline int
 static inline int
-reservation_object_test_signaled_single(struct fence *passed_fence)
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
 {
 {
-	struct fence *fence, *lfence = passed_fence;
+	struct dma_fence *fence, *lfence = passed_fence;
 	int ret = 1;
 	int ret = 1;
 
 
-	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-		fence = fence_get_rcu(lfence);
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+		fence = dma_fence_get_rcu(lfence);
 		if (!fence)
 		if (!fence)
 			return -1;
 			return -1;
 
 
-		ret = !!fence_is_signaled(fence);
-		fence_put(fence);
+		ret = !!dma_fence_is_signaled(fence);
+		dma_fence_put(fence);
 	}
 	}
 	return ret;
 	return ret;
 }
 }
@@ -492,7 +494,7 @@ retry:
 			shared_count = fobj->shared_count;
 			shared_count = fobj->shared_count;
 
 
 		for (i = 0; i < shared_count; ++i) {
 		for (i = 0; i < shared_count; ++i) {
-			struct fence *fence = rcu_dereference(fobj->shared[i]);
+			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 
 
 			ret = reservation_object_test_signaled_single(fence);
 			ret = reservation_object_test_signaled_single(fence);
 			if (ret < 0)
 			if (ret < 0)
@@ -506,7 +508,7 @@ retry:
 	}
 	}
 
 
 	if (!shared_count) {
 	if (!shared_count) {
-		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
+		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
 
 
 		if (fence_excl) {
 		if (fence_excl) {
 			ret = reservation_object_test_signaled_single(
 			ret = reservation_object_test_signaled_single(

+ 9 - 9
drivers/dma-buf/seqno-fence.c

@@ -21,35 +21,35 @@
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/seqno-fence.h>
 #include <linux/seqno-fence.h>
 
 
-static const char *seqno_fence_get_driver_name(struct fence *fence)
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 
 	return seqno_fence->ops->get_driver_name(fence);
 	return seqno_fence->ops->get_driver_name(fence);
 }
 }
 
 
-static const char *seqno_fence_get_timeline_name(struct fence *fence)
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
 {
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 
 	return seqno_fence->ops->get_timeline_name(fence);
 	return seqno_fence->ops->get_timeline_name(fence);
 }
 }
 
 
-static bool seqno_enable_signaling(struct fence *fence)
+static bool seqno_enable_signaling(struct dma_fence *fence)
 {
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 
 	return seqno_fence->ops->enable_signaling(fence);
 	return seqno_fence->ops->enable_signaling(fence);
 }
 }
 
 
-static bool seqno_signaled(struct fence *fence)
+static bool seqno_signaled(struct dma_fence *fence)
 {
 {
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
 
 
 	return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
 	return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
 }
 }
 
 
-static void seqno_release(struct fence *fence)
+static void seqno_release(struct dma_fence *fence)
 {
 {
 	struct seqno_fence *f = to_seqno_fence(fence);
 	struct seqno_fence *f = to_seqno_fence(fence);
 
 
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
 	if (f->ops->release)
 	if (f->ops->release)
 		f->ops->release(fence);
 		f->ops->release(fence);
 	else
 	else
-		fence_free(&f->base);
+		dma_fence_free(&f->base);
 }
 }
 
 
-static signed long seqno_wait(struct fence *fence, bool intr,
-				signed long timeout)
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+			      signed long timeout)
 {
 {
 	struct seqno_fence *f = to_seqno_fence(fence);
 	struct seqno_fence *f = to_seqno_fence(fence);
 
 
 	return f->ops->wait(fence, intr, timeout);
 	return f->ops->wait(fence, intr, timeout);
 }
 }
 
 
-const struct fence_ops seqno_fence_ops = {
+const struct dma_fence_ops seqno_fence_ops = {
 	.get_driver_name = seqno_fence_get_driver_name,
 	.get_driver_name = seqno_fence_get_driver_name,
 	.get_timeline_name = seqno_fence_get_timeline_name,
 	.get_timeline_name = seqno_fence_get_timeline_name,
 	.enable_signaling = seqno_enable_signaling,
 	.enable_signaling = seqno_enable_signaling,

+ 24 - 24
drivers/dma-buf/sw_sync.c

@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
 
 
 #define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
 #define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
 
 
-static const struct fence_ops timeline_fence_ops;
+static const struct dma_fence_ops timeline_fence_ops;
 
 
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
 {
 {
 	if (fence->ops != &timeline_fence_ops)
 	if (fence->ops != &timeline_fence_ops)
 		return NULL;
 		return NULL;
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
 		return NULL;
 		return NULL;
 
 
 	kref_init(&obj->kref);
 	kref_init(&obj->kref);
-	obj->context = fence_context_alloc(1);
+	obj->context = dma_fence_context_alloc(1);
 	strlcpy(obj->name, name, sizeof(obj->name));
 	strlcpy(obj->name, name, sizeof(obj->name));
 
 
 	INIT_LIST_HEAD(&obj->child_list_head);
 	INIT_LIST_HEAD(&obj->child_list_head);
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
 
 
 	list_for_each_entry_safe(pt, next, &obj->active_list_head,
 	list_for_each_entry_safe(pt, next, &obj->active_list_head,
 				 active_list) {
 				 active_list) {
-		if (fence_is_signaled_locked(&pt->base))
+		if (dma_fence_is_signaled_locked(&pt->base))
 			list_del_init(&pt->active_list);
 			list_del_init(&pt->active_list);
 	}
 	}
 
 
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
 
 
 	spin_lock_irqsave(&obj->child_list_lock, flags);
 	spin_lock_irqsave(&obj->child_list_lock, flags);
 	sync_timeline_get(obj);
 	sync_timeline_get(obj);
-	fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
-		   obj->context, value);
+	dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+		       obj->context, value);
 	list_add_tail(&pt->child_list, &obj->child_list_head);
 	list_add_tail(&pt->child_list, &obj->child_list_head);
 	INIT_LIST_HEAD(&pt->active_list);
 	INIT_LIST_HEAD(&pt->active_list);
 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 	return pt;
 	return pt;
 }
 }
 
 
-static const char *timeline_fence_get_driver_name(struct fence *fence)
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "sw_sync";
 	return "sw_sync";
 }
 }
 
 
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
 {
 {
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 
 	return parent->name;
 	return parent->name;
 }
 }
 
 
-static void timeline_fence_release(struct fence *fence)
+static void timeline_fence_release(struct dma_fence *fence)
 {
 {
-	struct sync_pt *pt = fence_to_sync_pt(fence);
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(fence->lock, flags);
 	spin_lock_irqsave(fence->lock, flags);
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
 	spin_unlock_irqrestore(fence->lock, flags);
 	spin_unlock_irqrestore(fence->lock, flags);
 
 
 	sync_timeline_put(parent);
 	sync_timeline_put(parent);
-	fence_free(fence);
+	dma_fence_free(fence);
 }
 }
 
 
-static bool timeline_fence_signaled(struct fence *fence)
+static bool timeline_fence_signaled(struct dma_fence *fence)
 {
 {
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 
 	return (fence->seqno > parent->value) ? false : true;
 	return (fence->seqno > parent->value) ? false : true;
 }
 }
 
 
-static bool timeline_fence_enable_signaling(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
 {
 {
-	struct sync_pt *pt = fence_to_sync_pt(fence);
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 
 	if (timeline_fence_signaled(fence))
 	if (timeline_fence_signaled(fence))
 		return false;
 		return false;
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
 	return true;
 	return true;
 }
 }
 
 
-static void timeline_fence_value_str(struct fence *fence,
+static void timeline_fence_value_str(struct dma_fence *fence,
 				    char *str, int size)
 				    char *str, int size)
 {
 {
 	snprintf(str, size, "%d", fence->seqno);
 	snprintf(str, size, "%d", fence->seqno);
 }
 }
 
 
-static void timeline_fence_timeline_value_str(struct fence *fence,
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
 					     char *str, int size)
 					     char *str, int size)
 {
 {
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 
 	snprintf(str, size, "%d", parent->value);
 	snprintf(str, size, "%d", parent->value);
 }
 }
 
 
-static const struct fence_ops timeline_fence_ops = {
+static const struct dma_fence_ops timeline_fence_ops = {
 	.get_driver_name = timeline_fence_get_driver_name,
 	.get_driver_name = timeline_fence_get_driver_name,
 	.get_timeline_name = timeline_fence_get_timeline_name,
 	.get_timeline_name = timeline_fence_get_timeline_name,
 	.enable_signaling = timeline_fence_enable_signaling,
 	.enable_signaling = timeline_fence_enable_signaling,
 	.signaled = timeline_fence_signaled,
 	.signaled = timeline_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = timeline_fence_release,
 	.release = timeline_fence_release,
 	.fence_value_str = timeline_fence_value_str,
 	.fence_value_str = timeline_fence_value_str,
 	.timeline_value_str = timeline_fence_timeline_value_str,
 	.timeline_value_str = timeline_fence_timeline_value_str,
@@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
 
 
 	sync_file = sync_file_create(&pt->base);
 	sync_file = sync_file_create(&pt->base);
 	if (!sync_file) {
 	if (!sync_file) {
-		fence_put(&pt->base);
+		dma_fence_put(&pt->base);
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto err;
 		goto err;
 	}
 	}

+ 7 - 6
drivers/dma-buf/sync_debug.c

@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
 	return "error";
 	return "error";
 }
 }
 
 
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+			     struct dma_fence *fence, bool show)
 {
 {
 	int status = 1;
 	int status = 1;
-	struct sync_timeline *parent = fence_parent(fence);
+	struct sync_timeline *parent = dma_fence_parent(fence);
 
 
-	if (fence_is_signaled_locked(fence))
+	if (dma_fence_is_signaled_locked(fence))
 		status = fence->status;
 		status = fence->status;
 
 
 	seq_printf(s, "  %s%sfence %s",
 	seq_printf(s, "  %s%sfence %s",
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
 	int i;
 	int i;
 
 
 	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
 	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
-		   sync_status_str(!fence_is_signaled(sync_file->fence)));
+		   sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
 
 
-	if (fence_is_array(sync_file->fence)) {
-		struct fence_array *array = to_fence_array(sync_file->fence);
+	if (dma_fence_is_array(sync_file->fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
 
 
 		for (i = 0; i < array->num_fences; ++i)
 		for (i = 0; i < array->num_fences; ++i)
 			sync_print_fence(s, array->fences[i], true);
 			sync_print_fence(s, array->fences[i], true);

+ 4 - 5
drivers/dma-buf/sync_debug.h

@@ -15,7 +15,7 @@
 
 
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 #include <linux/sync_file.h>
 #include <linux/sync_file.h>
 #include <uapi/linux/sync_file.h>
 #include <uapi/linux/sync_file.h>
@@ -45,10 +45,9 @@ struct sync_timeline {
 	struct list_head	sync_timeline_list;
 	struct list_head	sync_timeline_list;
 };
 };
 
 
-static inline struct sync_timeline *fence_parent(struct fence *fence)
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
 {
 {
-	return container_of(fence->lock, struct sync_timeline,
-			    child_list_lock);
+	return container_of(fence->lock, struct sync_timeline, child_list_lock);
 }
 }
 
 
 /**
 /**
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
  * @active_list: sync timeline active child's list
  * @active_list: sync timeline active child's list
  */
  */
 struct sync_pt {
 struct sync_pt {
-	struct fence base;
+	struct dma_fence base;
 	struct list_head child_list;
 	struct list_head child_list;
 	struct list_head active_list;
 	struct list_head active_list;
 };
 };

+ 33 - 30
drivers/dma-buf/sync_file.c

@@ -54,7 +54,7 @@ err:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 {
 	struct sync_file *sync_file;
 	struct sync_file *sync_file;
 
 
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
  * takes ownership of @fence. The sync_file can be released with
  * takes ownership of @fence. The sync_file can be released with
  * fput(sync_file->file). Returns the sync_file or NULL in case of error.
  * fput(sync_file->file). Returns the sync_file or NULL in case of error.
  */
  */
-struct sync_file *sync_file_create(struct fence *fence)
+struct sync_file *sync_file_create(struct dma_fence *fence)
 {
 {
 	struct sync_file *sync_file;
 	struct sync_file *sync_file;
 
 
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
 	if (!sync_file)
 	if (!sync_file)
 		return NULL;
 		return NULL;
 
 
-	sync_file->fence = fence_get(fence);
+	sync_file->fence = dma_fence_get(fence);
 
 
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 		 fence->ops->get_driver_name(fence),
 		 fence->ops->get_driver_name(fence),
@@ -121,16 +121,16 @@ err:
  * Ensures @fd references a valid sync_file and returns a fence that
  * Ensures @fd references a valid sync_file and returns a fence that
  * represents all fence in the sync_file. On error NULL is returned.
  * represents all fence in the sync_file. On error NULL is returned.
  */
  */
-struct fence *sync_file_get_fence(int fd)
+struct dma_fence *sync_file_get_fence(int fd)
 {
 {
 	struct sync_file *sync_file;
 	struct sync_file *sync_file;
-	struct fence *fence;
+	struct dma_fence *fence;
 
 
 	sync_file = sync_file_fdget(fd);
 	sync_file = sync_file_fdget(fd);
 	if (!sync_file)
 	if (!sync_file)
 		return NULL;
 		return NULL;
 
 
-	fence = fence_get(sync_file->fence);
+	fence = dma_fence_get(sync_file->fence);
 	fput(sync_file->file);
 	fput(sync_file->file);
 
 
 	return fence;
 	return fence;
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
 EXPORT_SYMBOL(sync_file_get_fence);
 EXPORT_SYMBOL(sync_file_get_fence);
 
 
 static int sync_file_set_fence(struct sync_file *sync_file,
 static int sync_file_set_fence(struct sync_file *sync_file,
-			       struct fence **fences, int num_fences)
+			       struct dma_fence **fences, int num_fences)
 {
 {
-	struct fence_array *array;
+	struct dma_fence_array *array;
 
 
 	/*
 	/*
 	 * The reference for the fences in the new sync_file and held
 	 * The reference for the fences in the new sync_file and held
 	 * in add_fence() during the merge procedure, so for num_fences == 1
 	 * in add_fence() during the merge procedure, so for num_fences == 1
 	 * we already own a new reference to the fence. For num_fence > 1
 	 * we already own a new reference to the fence. For num_fence > 1
-	 * we own the reference of the fence_array creation.
+	 * we own the reference of the dma_fence_array creation.
 	 */
 	 */
 	if (num_fences == 1) {
 	if (num_fences == 1) {
 		sync_file->fence = fences[0];
 		sync_file->fence = fences[0];
 		kfree(fences);
 		kfree(fences);
 	} else {
 	} else {
-		array = fence_array_create(num_fences, fences,
-					   fence_context_alloc(1), 1, false);
+		array = dma_fence_array_create(num_fences, fences,
+					       dma_fence_context_alloc(1),
+					       1, false);
 		if (!array)
 		if (!array)
 			return -ENOMEM;
 			return -ENOMEM;
 
 
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
 	return 0;
 	return 0;
 }
 }
 
 
-static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+				     int *num_fences)
 {
 {
-	if (fence_is_array(sync_file->fence)) {
-		struct fence_array *array = to_fence_array(sync_file->fence);
+	if (dma_fence_is_array(sync_file->fence)) {
+		struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
 
 
 		*num_fences = array->num_fences;
 		*num_fences = array->num_fences;
 		return array->fences;
 		return array->fences;
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
 	return &sync_file->fence;
 	return &sync_file->fence;
 }
 }
 
 
-static void add_fence(struct fence **fences, int *i, struct fence *fence)
+static void add_fence(struct dma_fence **fences,
+		      int *i, struct dma_fence *fence)
 {
 {
 	fences[*i] = fence;
 	fences[*i] = fence;
 
 
-	if (!fence_is_signaled(fence)) {
-		fence_get(fence);
+	if (!dma_fence_is_signaled(fence)) {
+		dma_fence_get(fence);
 		(*i)++;
 		(*i)++;
 	}
 	}
 }
 }
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 					 struct sync_file *b)
 					 struct sync_file *b)
 {
 {
 	struct sync_file *sync_file;
 	struct sync_file *sync_file;
-	struct fence **fences, **nfences, **a_fences, **b_fences;
+	struct dma_fence **fences, **nfences, **a_fences, **b_fences;
 	int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 	int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
 
 	sync_file = sync_file_alloc();
 	sync_file = sync_file_alloc();
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 	 * and sync_file_create, this is a reasonable assumption.
 	 * and sync_file_create, this is a reasonable assumption.
 	 */
 	 */
 	for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
 	for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
-		struct fence *pt_a = a_fences[i_a];
-		struct fence *pt_b = b_fences[i_b];
+		struct dma_fence *pt_a = a_fences[i_a];
+		struct dma_fence *pt_b = b_fences[i_b];
 
 
 		if (pt_a->context < pt_b->context) {
 		if (pt_a->context < pt_b->context) {
 			add_fence(fences, &i, pt_a);
 			add_fence(fences, &i, pt_a);
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
 		add_fence(fences, &i, b_fences[i_b]);
 		add_fence(fences, &i, b_fences[i_b]);
 
 
 	if (i == 0)
 	if (i == 0)
-		fences[i++] = fence_get(a_fences[0]);
+		fences[i++] = dma_fence_get(a_fences[0]);
 
 
 	if (num_fences > i) {
 	if (num_fences > i) {
 		nfences = krealloc(fences, i * sizeof(*fences),
 		nfences = krealloc(fences, i * sizeof(*fences),
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
 						     kref);
 						     kref);
 
 
 	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
 	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
-		fence_remove_callback(sync_file->fence, &sync_file->cb);
-	fence_put(sync_file->fence);
+		dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+	dma_fence_put(sync_file->fence);
 	kfree(sync_file);
 	kfree(sync_file);
 }
 }
 
 
@@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
 
 	if (!poll_does_not_wait(wait) &&
 	if (!poll_does_not_wait(wait) &&
 	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
 	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
-		if (fence_add_callback(sync_file->fence, &sync_file->cb,
-				       fence_check_cb_func) < 0)
+		if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
 			wake_up_all(&sync_file->wq);
 	}
 	}
 
 
-	return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+	return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
 }
 }
 
 
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
 static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -370,14 +373,14 @@ err_put_fd:
 	return err;
 	return err;
 }
 }
 
 
-static void sync_fill_fence_info(struct fence *fence,
+static void sync_fill_fence_info(struct dma_fence *fence,
 				 struct sync_fence_info *info)
 				 struct sync_fence_info *info)
 {
 {
 	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
 	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
 		sizeof(info->obj_name));
 		sizeof(info->obj_name));
 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
 		sizeof(info->driver_name));
 		sizeof(info->driver_name));
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		info->status = fence->status >= 0 ? 1 : fence->status;
 		info->status = fence->status >= 0 ? 1 : fence->status;
 	else
 	else
 		info->status = 0;
 		info->status = 0;
@@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 {
 {
 	struct sync_file_info info;
 	struct sync_file_info info;
 	struct sync_fence_info *fence_info = NULL;
 	struct sync_fence_info *fence_info = NULL;
-	struct fence **fences;
+	struct dma_fence **fences;
 	__u32 size;
 	__u32 size;
 	int num_fences, ret, i;
 	int num_fences, ret, i;
 
 
@@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 
 
 no_fences:
 no_fences:
 	strlcpy(info.name, sync_file->name, sizeof(info.name));
 	strlcpy(info.name, sync_file->name, sizeof(info.name));
-	info.status = fence_is_signaled(sync_file->fence);
+	info.status = dma_fence_is_signaled(sync_file->fence);
 	info.num_fences = num_fences;
 	info.num_fences = num_fences;
 
 
 	if (copy_to_user((void __user *)arg, &info, sizeof(info)))
 	if (copy_to_user((void __user *)arg, &info, sizeof(info)))

+ 16 - 16
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -34,7 +34,7 @@
 #include <linux/kref.h>
 #include <linux/kref.h>
 #include <linux/interval_tree.h>
 #include <linux/interval_tree.h>
 #include <linux/hashtable.h>
 #include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 #include <ttm/ttm_bo_api.h>
 #include <ttm/ttm_bo_api.h>
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_bo_driver.h>
@@ -359,7 +359,7 @@ struct amdgpu_bo_va_mapping {
 struct amdgpu_bo_va {
 struct amdgpu_bo_va {
 	/* protected by bo being reserved */
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
 	struct list_head		bo_list;
-	struct fence		        *last_pt_update;
+	struct dma_fence	        *last_pt_update;
 	unsigned			ref_count;
 	unsigned			ref_count;
 
 
 	/* protected by vm mutex and spinlock */
 	/* protected by vm mutex and spinlock */
@@ -474,7 +474,7 @@ struct amdgpu_sa_bo {
 	struct amdgpu_sa_manager	*manager;
 	struct amdgpu_sa_manager	*manager;
 	unsigned			soffset;
 	unsigned			soffset;
 	unsigned			eoffset;
 	unsigned			eoffset;
-	struct fence		        *fence;
+	struct dma_fence	        *fence;
 };
 };
 
 
 /*
 /*
@@ -613,10 +613,10 @@ struct amdgpu_flip_work {
 	uint64_t			base;
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct drm_pending_vblank_event *event;
 	struct amdgpu_bo		*old_abo;
 	struct amdgpu_bo		*old_abo;
-	struct fence			*excl;
+	struct dma_fence		*excl;
 	unsigned			shared_count;
 	unsigned			shared_count;
-	struct fence			**shared;
-	struct fence_cb			cb;
+	struct dma_fence		**shared;
+	struct dma_fence_cb		cb;
 	bool				async;
 	bool				async;
 };
 };
 
 
@@ -644,7 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 		      struct amd_sched_entity *entity, void *owner,
 		      struct amd_sched_entity *entity, void *owner,
-		      struct fence **f);
+		      struct dma_fence **f);
 
 
 /*
 /*
  * context related structures
  * context related structures
@@ -652,7 +652,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 
 
 struct amdgpu_ctx_ring {
 struct amdgpu_ctx_ring {
 	uint64_t		sequence;
 	uint64_t		sequence;
-	struct fence		**fences;
+	struct dma_fence	**fences;
 	struct amd_sched_entity	entity;
 	struct amd_sched_entity	entity;
 };
 };
 
 
@@ -661,7 +661,7 @@ struct amdgpu_ctx {
 	struct amdgpu_device    *adev;
 	struct amdgpu_device    *adev;
 	unsigned		reset_counter;
 	unsigned		reset_counter;
 	spinlock_t		ring_lock;
 	spinlock_t		ring_lock;
-	struct fence            **fences;
+	struct dma_fence	**fences;
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
 	bool preamble_presented;
 	bool preamble_presented;
 };
 };
@@ -677,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
 
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
-			      struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+			      struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 				   struct amdgpu_ring *ring, uint64_t seq);
 				   struct amdgpu_ring *ring, uint64_t seq);
 
 
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -889,10 +889,10 @@ struct amdgpu_gfx {
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		  unsigned size, struct amdgpu_ib *ib);
 		  unsigned size, struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
-		    struct fence *f);
+		    struct dma_fence *f);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
-		       struct amdgpu_ib *ib, struct fence *last_vm_update,
-		       struct amdgpu_job *job, struct fence **f);
+		       struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+		       struct amdgpu_job *job, struct dma_fence **f);
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@@ -923,7 +923,7 @@ struct amdgpu_cs_parser {
 	struct amdgpu_bo_list		*bo_list;
 	struct amdgpu_bo_list		*bo_list;
 	struct amdgpu_bo_list_entry	vm_pd;
 	struct amdgpu_bo_list_entry	vm_pd;
 	struct list_head		validated;
 	struct list_head		validated;
-	struct fence			*fence;
+	struct dma_fence		*fence;
 	uint64_t			bytes_moved_threshold;
 	uint64_t			bytes_moved_threshold;
 	uint64_t			bytes_moved;
 	uint64_t			bytes_moved;
 	struct amdgpu_bo_list_entry	*evictable;
 	struct amdgpu_bo_list_entry	*evictable;
@@ -943,7 +943,7 @@ struct amdgpu_job {
 	struct amdgpu_ring	*ring;
 	struct amdgpu_ring	*ring;
 	struct amdgpu_sync	sync;
 	struct amdgpu_sync	sync;
 	struct amdgpu_ib	*ibs;
 	struct amdgpu_ib	*ibs;
-	struct fence		*fence; /* the hw fence */
+	struct dma_fence	*fence; /* the hw fence */
 	uint32_t		preamble_status;
 	uint32_t		preamble_status;
 	uint32_t		num_ibs;
 	uint32_t		num_ibs;
 	void			*owner;
 	void			*owner;

+ 4 - 4
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c

@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 {
 {
 	unsigned long start_jiffies;
 	unsigned long start_jiffies;
 	unsigned long end_jiffies;
 	unsigned long end_jiffies;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	int i, r;
 	int i, r;
 
 
 	start_jiffies = jiffies;
 	start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 				       false);
 				       false);
 		if (r)
 		if (r)
 			goto exit_do_move;
 			goto exit_do_move;
-		r = fence_wait(fence, false);
+		r = dma_fence_wait(fence, false);
 		if (r)
 		if (r)
 			goto exit_do_move;
 			goto exit_do_move;
-		fence_put(fence);
+		dma_fence_put(fence);
 	}
 	}
 	end_jiffies = jiffies;
 	end_jiffies = jiffies;
 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
 
 exit_do_move:
 exit_do_move:
 	if (fence)
 	if (fence)
-		fence_put(fence);
+		dma_fence_put(fence);
 	return r;
 	return r;
 }
 }
 
 

+ 8 - 8
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -735,7 +735,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
 		ttm_eu_backoff_reservation(&parser->ticket,
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 					   &parser->validated);
 	}
 	}
-	fence_put(parser->fence);
+	dma_fence_put(parser->fence);
 
 
 	if (parser->ctx)
 	if (parser->ctx)
 		amdgpu_ctx_put(parser->ctx);
 		amdgpu_ctx_put(parser->ctx);
@@ -772,7 +772,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 
 
 	if (p->bo_list) {
 	if (p->bo_list) {
 		for (i = 0; i < p->bo_list->num_entries; i++) {
 		for (i = 0; i < p->bo_list->num_entries; i++) {
-			struct fence *f;
+			struct dma_fence *f;
 
 
 			/* ignore duplicates */
 			/* ignore duplicates */
 			bo = p->bo_list->array[i].robj;
 			bo = p->bo_list->array[i].robj;
@@ -973,7 +973,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 		for (j = 0; j < num_deps; ++j) {
 		for (j = 0; j < num_deps; ++j) {
 			struct amdgpu_ring *ring;
 			struct amdgpu_ring *ring;
 			struct amdgpu_ctx *ctx;
 			struct amdgpu_ctx *ctx;
-			struct fence *fence;
+			struct dma_fence *fence;
 
 
 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 					       deps[j].ip_instance,
 					       deps[j].ip_instance,
@@ -995,7 +995,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 			} else if (fence) {
 			} else if (fence) {
 				r = amdgpu_sync_fence(adev, &p->job->sync,
 				r = amdgpu_sync_fence(adev, &p->job->sync,
 						      fence);
 						      fence);
-				fence_put(fence);
+				dma_fence_put(fence);
 				amdgpu_ctx_put(ctx);
 				amdgpu_ctx_put(ctx);
 				if (r)
 				if (r)
 					return r;
 					return r;
@@ -1025,7 +1025,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
 
 	job->owner = p->filp;
 	job->owner = p->filp;
 	job->fence_ctx = entity->fence_context;
 	job->fence_ctx = entity->fence_context;
-	p->fence = fence_get(&job->base.s_fence->finished);
+	p->fence = dma_fence_get(&job->base.s_fence->finished);
 	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
 	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
 	job->uf_sequence = cs->out.handle;
 	job->uf_sequence = cs->out.handle;
 	amdgpu_job_free_resources(job);
 	amdgpu_job_free_resources(job);
@@ -1108,7 +1108,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
 	struct amdgpu_ring *ring = NULL;
 	struct amdgpu_ring *ring = NULL;
 	struct amdgpu_ctx *ctx;
 	struct amdgpu_ctx *ctx;
-	struct fence *fence;
+	struct dma_fence *fence;
 	long r;
 	long r;
 
 
 	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
 	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1124,8 +1124,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 	if (IS_ERR(fence))
 	if (IS_ERR(fence))
 		r = PTR_ERR(fence);
 		r = PTR_ERR(fence);
 	else if (fence) {
 	else if (fence) {
-		r = fence_wait_timeout(fence, true, timeout);
-		fence_put(fence);
+		r = dma_fence_wait_timeout(fence, true, timeout);
+		dma_fence_put(fence);
 	} else
 	} else
 		r = 1;
 		r = 1;
 
 

+ 11 - 11
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 	kref_init(&ctx->refcount);
 	kref_init(&ctx->refcount);
 	spin_lock_init(&ctx->ring_lock);
 	spin_lock_init(&ctx->ring_lock);
 	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
 	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
-			      sizeof(struct fence*), GFP_KERNEL);
+			      sizeof(struct dma_fence*), GFP_KERNEL);
 	if (!ctx->fences)
 	if (!ctx->fences)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		for (j = 0; j < amdgpu_sched_jobs; ++j)
 		for (j = 0; j < amdgpu_sched_jobs; ++j)
-			fence_put(ctx->rings[i].fences[j]);
+			dma_fence_put(ctx->rings[i].fences[j]);
 	kfree(ctx->fences);
 	kfree(ctx->fences);
 	ctx->fences = NULL;
 	ctx->fences = NULL;
 
 
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 }
 }
 
 
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
-			      struct fence *fence)
+			      struct dma_fence *fence)
 {
 {
 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 	uint64_t seq = cring->sequence;
 	uint64_t seq = cring->sequence;
 	unsigned idx = 0;
 	unsigned idx = 0;
-	struct fence *other = NULL;
+	struct dma_fence *other = NULL;
 
 
 	idx = seq & (amdgpu_sched_jobs - 1);
 	idx = seq & (amdgpu_sched_jobs - 1);
 	other = cring->fences[idx];
 	other = cring->fences[idx];
 	if (other) {
 	if (other) {
 		signed long r;
 		signed long r;
-		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
 		if (r < 0)
 		if (r < 0)
 			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
 			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
 	}
 	}
 
 
-	fence_get(fence);
+	dma_fence_get(fence);
 
 
 	spin_lock(&ctx->ring_lock);
 	spin_lock(&ctx->ring_lock);
 	cring->fences[idx] = fence;
 	cring->fences[idx] = fence;
 	cring->sequence++;
 	cring->sequence++;
 	spin_unlock(&ctx->ring_lock);
 	spin_unlock(&ctx->ring_lock);
 
 
-	fence_put(other);
+	dma_fence_put(other);
 
 
 	return seq;
 	return seq;
 }
 }
 
 
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
-				   struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				       struct amdgpu_ring *ring, uint64_t seq)
 {
 {
 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
-	struct fence *fence;
+	struct dma_fence *fence;
 
 
 	spin_lock(&ctx->ring_lock);
 	spin_lock(&ctx->ring_lock);
 
 
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+	fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
 	spin_unlock(&ctx->ring_lock);
 	spin_unlock(&ctx->ring_lock);
 
 
 	return fence;
 	return fence;

+ 7 - 7
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -1620,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->vm_manager.vm_pte_funcs = NULL;
 	adev->vm_manager.vm_pte_funcs = NULL;
 	adev->vm_manager.vm_pte_num_rings = 0;
 	adev->vm_manager.vm_pte_num_rings = 0;
 	adev->gart.gart_funcs = NULL;
 	adev->gart.gart_funcs = NULL;
-	adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 
 
 	adev->smc_rreg = &amdgpu_invalid_rreg;
 	adev->smc_rreg = &amdgpu_invalid_rreg;
 	adev->smc_wreg = &amdgpu_invalid_wreg;
 	adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -2215,7 +2215,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
 					   struct amdgpu_ring *ring,
 					   struct amdgpu_ring *ring,
 					   struct amdgpu_bo *bo,
 					   struct amdgpu_bo *bo,
-					   struct fence **fence)
+					   struct dma_fence **fence)
 {
 {
 	uint32_t domain;
 	uint32_t domain;
 	int r;
 	int r;
@@ -2334,30 +2334,30 @@ retry:
 		if (need_full_reset && amdgpu_need_backup(adev)) {
 		if (need_full_reset && amdgpu_need_backup(adev)) {
 			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 			struct amdgpu_bo *bo, *tmp;
 			struct amdgpu_bo *bo, *tmp;
-			struct fence *fence = NULL, *next = NULL;
+			struct dma_fence *fence = NULL, *next = NULL;
 
 
 			DRM_INFO("recover vram bo from shadow\n");
 			DRM_INFO("recover vram bo from shadow\n");
 			mutex_lock(&adev->shadow_list_lock);
 			mutex_lock(&adev->shadow_list_lock);
 			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
 			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
 				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
 				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
 				if (fence) {
 				if (fence) {
-					r = fence_wait(fence, false);
+					r = dma_fence_wait(fence, false);
 					if (r) {
 					if (r) {
 						WARN(r, "recovery from shadow isn't comleted\n");
 						WARN(r, "recovery from shadow isn't comleted\n");
 						break;
 						break;
 					}
 					}
 				}
 				}
 
 
-				fence_put(fence);
+				dma_fence_put(fence);
 				fence = next;
 				fence = next;
 			}
 			}
 			mutex_unlock(&adev->shadow_list_lock);
 			mutex_unlock(&adev->shadow_list_lock);
 			if (fence) {
 			if (fence) {
-				r = fence_wait(fence, false);
+				r = dma_fence_wait(fence, false);
 				if (r)
 				if (r)
 					WARN(r, "recovery from shadow isn't comleted\n");
 					WARN(r, "recovery from shadow isn't comleted\n");
 			}
 			}
-			fence_put(fence);
+			dma_fence_put(fence);
 		}
 		}
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 			struct amdgpu_ring *ring = adev->rings[i];
 			struct amdgpu_ring *ring = adev->rings[i];

+ 8 - 8
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c

@@ -35,29 +35,29 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_edid.h>
 
 
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 {
 	struct amdgpu_flip_work *work =
 	struct amdgpu_flip_work *work =
 		container_of(cb, struct amdgpu_flip_work, cb);
 		container_of(cb, struct amdgpu_flip_work, cb);
 
 
-	fence_put(f);
+	dma_fence_put(f);
 	schedule_work(&work->flip_work.work);
 	schedule_work(&work->flip_work.work);
 }
 }
 
 
 static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
 static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
-				     struct fence **f)
+				     struct dma_fence **f)
 {
 {
-	struct fence *fence= *f;
+	struct dma_fence *fence= *f;
 
 
 	if (fence == NULL)
 	if (fence == NULL)
 		return false;
 		return false;
 
 
 	*f = NULL;
 	*f = NULL;
 
 
-	if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+	if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
 		return true;
 		return true;
 
 
-	fence_put(fence);
+	dma_fence_put(fence);
 	return false;
 	return false;
 }
 }
 
 
@@ -244,9 +244,9 @@ unreserve:
 
 
 cleanup:
 cleanup:
 	amdgpu_bo_unref(&work->old_abo);
 	amdgpu_bo_unref(&work->old_abo);
-	fence_put(work->excl);
+	dma_fence_put(work->excl);
 	for (i = 0; i < work->shared_count; ++i)
 	for (i = 0; i < work->shared_count; ++i)
-		fence_put(work->shared[i]);
+		dma_fence_put(work->shared[i]);
 	kfree(work->shared);
 	kfree(work->shared);
 	kfree(work);
 	kfree(work);
 
 

+ 29 - 29
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

@@ -48,7 +48,7 @@
  */
  */
 
 
 struct amdgpu_fence {
 struct amdgpu_fence {
-	struct fence base;
+	struct dma_fence base;
 
 
 	/* RB, DMA, etc. */
 	/* RB, DMA, etc. */
 	struct amdgpu_ring		*ring;
 	struct amdgpu_ring		*ring;
@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
 /*
 /*
  * Cast helper
  * Cast helper
  */
  */
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 {
 {
 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 
 
@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
  * Emits a fence command on the requested ring (all asics).
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  * Returns 0 on success, -ENOMEM on failure.
  */
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_fence *fence;
 	struct amdgpu_fence *fence;
-	struct fence *old, **ptr;
+	struct dma_fence *old, **ptr;
 	uint32_t seq;
 	uint32_t seq;
 
 
 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
 
 
 	seq = ++ring->fence_drv.sync_seq;
 	seq = ++ring->fence_drv.sync_seq;
 	fence->ring = ring;
 	fence->ring = ring;
-	fence_init(&fence->base, &amdgpu_fence_ops,
-		   &ring->fence_drv.lock,
-		   adev->fence_context + ring->idx,
-		   seq);
+	dma_fence_init(&fence->base, &amdgpu_fence_ops,
+		       &ring->fence_drv.lock,
+		       adev->fence_context + ring->idx,
+		       seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       seq, AMDGPU_FENCE_FLAG_INT);
 			       seq, AMDGPU_FENCE_FLAG_INT);
 
 
@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
 	 * emitting the fence would mess up the hardware ring buffer.
 	 * emitting the fence would mess up the hardware ring buffer.
 	 */
 	 */
 	old = rcu_dereference_protected(*ptr, 1);
 	old = rcu_dereference_protected(*ptr, 1);
-	if (old && !fence_is_signaled(old)) {
+	if (old && !dma_fence_is_signaled(old)) {
 		DRM_INFO("rcu slot is busy\n");
 		DRM_INFO("rcu slot is busy\n");
-		fence_wait(old, false);
+		dma_fence_wait(old, false);
 	}
 	}
 
 
-	rcu_assign_pointer(*ptr, fence_get(&fence->base));
+	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
 
 
 	*f = &fence->base;
 	*f = &fence->base;
 
 
@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
 	seq &= drv->num_fences_mask;
 	seq &= drv->num_fences_mask;
 
 
 	do {
 	do {
-		struct fence *fence, **ptr;
+		struct dma_fence *fence, **ptr;
 
 
 		++last_seq;
 		++last_seq;
 		last_seq &= drv->num_fences_mask;
 		last_seq &= drv->num_fences_mask;
@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
 		if (!fence)
 		if (!fence)
 			continue;
 			continue;
 
 
-		r = fence_signal(fence);
+		r = dma_fence_signal(fence);
 		if (!r)
 		if (!r)
-			FENCE_TRACE(fence, "signaled from irq context\n");
+			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
 		else
 		else
 			BUG();
 			BUG();
 
 
-		fence_put(fence);
+		dma_fence_put(fence);
 	} while (last_seq != seq);
 	} while (last_seq != seq);
 }
 }
 
 
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
 {
 	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
 	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
-	struct fence *fence, **ptr;
+	struct dma_fence *fence, **ptr;
 	int r;
 	int r;
 
 
 	if (!seq)
 	if (!seq)
@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 	rcu_read_lock();
 	rcu_read_lock();
 	fence = rcu_dereference(*ptr);
 	fence = rcu_dereference(*ptr);
-	if (!fence || !fence_get_rcu(fence)) {
+	if (!fence || !dma_fence_get_rcu(fence)) {
 		rcu_read_unlock();
 		rcu_read_unlock();
 		return 0;
 		return 0;
 	}
 	}
 	rcu_read_unlock();
 	rcu_read_unlock();
 
 
-	r = fence_wait(fence, false);
-	fence_put(fence);
+	r = dma_fence_wait(fence, false);
+	dma_fence_put(fence);
 	return r;
 	return r;
 }
 }
 
 
@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 		amd_sched_fini(&ring->sched);
 		amd_sched_fini(&ring->sched);
 		del_timer_sync(&ring->fence_drv.fallback_timer);
 		del_timer_sync(&ring->fence_drv.fallback_timer);
 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
-			fence_put(ring->fence_drv.fences[j]);
+			dma_fence_put(ring->fence_drv.fences[j]);
 		kfree(ring->fence_drv.fences);
 		kfree(ring->fence_drv.fences);
 		ring->fence_drv.fences = NULL;
 		ring->fence_drv.fences = NULL;
 		ring->fence_drv.initialized = false;
 		ring->fence_drv.initialized = false;
@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
  * Common fence implementation
  * Common fence implementation
  */
  */
 
 
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "amdgpu";
 	return "amdgpu";
 }
 }
 
 
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 {
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	return (const char *)fence->ring->name;
 	return (const char *)fence->ring->name;
@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
  * to fence_queue that checks if this fence is signaled, and if so it
  * to fence_queue that checks if this fence is signaled, and if so it
  * signals the fence and removes itself.
  * signals the fence and removes itself.
  */
  */
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 {
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_ring *ring = fence->ring;
 	struct amdgpu_ring *ring = fence->ring;
@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
 	if (!timer_pending(&ring->fence_drv.fallback_timer))
 	if (!timer_pending(&ring->fence_drv.fallback_timer))
 		amdgpu_fence_schedule_fallback(ring);
 		amdgpu_fence_schedule_fallback(ring);
 
 
-	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 
 
 	return true;
 	return true;
 }
 }
@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
  */
  */
 static void amdgpu_fence_free(struct rcu_head *rcu)
 static void amdgpu_fence_free(struct rcu_head *rcu)
 {
 {
-	struct fence *f = container_of(rcu, struct fence, rcu);
+	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	kmem_cache_free(amdgpu_fence_slab, fence);
 	kmem_cache_free(amdgpu_fence_slab, fence);
 }
 }
@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
  * This function is called when the reference count becomes zero.
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
  * It just RCU schedules freeing up the fence.
  */
  */
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
 {
 {
 	call_rcu(&f->rcu, amdgpu_fence_free);
 	call_rcu(&f->rcu, amdgpu_fence_free);
 }
 }
 
 
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
 	.get_driver_name = amdgpu_fence_get_driver_name,
 	.get_driver_name = amdgpu_fence_get_driver_name,
 	.get_timeline_name = amdgpu_fence_get_timeline_name,
 	.get_timeline_name = amdgpu_fence_get_timeline_name,
 	.enable_signaling = amdgpu_fence_enable_signaling,
 	.enable_signaling = amdgpu_fence_enable_signaling,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = amdgpu_fence_release,
 	.release = amdgpu_fence_release,
 };
 };
 
 

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c

@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * Free an IB (all asics).
  * Free an IB (all asics).
  */
  */
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
-		    struct fence *f)
+		    struct dma_fence *f)
 {
 {
 	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 }
 }
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  * to SI there was just a DE IB.
  * to SI there was just a DE IB.
  */
  */
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
-		       struct amdgpu_ib *ibs, struct fence *last_vm_update,
-		       struct amdgpu_job *job, struct fence **f)
+		       struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+		       struct amdgpu_job *job, struct dma_fence **f)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib *ib = &ibs[0];
 	struct amdgpu_ib *ib = &ibs[0];

+ 11 - 11
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 
 
 void amdgpu_job_free_resources(struct amdgpu_job *job)
 void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
 {
-	struct fence *f;
+	struct dma_fence *f;
 	unsigned i;
 	unsigned i;
 
 
 	/* use sched fence if available */
 	/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
 {
 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
 
-	fence_put(job->fence);
+	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
 	amdgpu_sync_free(&job->sync);
 	kfree(job);
 	kfree(job);
 }
 }
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
 {
 {
 	amdgpu_job_free_resources(job);
 	amdgpu_job_free_resources(job);
 
 
-	fence_put(job->fence);
+	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
 	amdgpu_sync_free(&job->sync);
 	kfree(job);
 	kfree(job);
 }
 }
 
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 		      struct amd_sched_entity *entity, void *owner,
 		      struct amd_sched_entity *entity, void *owner,
-		      struct fence **f)
+		      struct dma_fence **f)
 {
 {
 	int r;
 	int r;
 	job->ring = ring;
 	job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 
 
 	job->owner = owner;
 	job->owner = owner;
 	job->fence_ctx = entity->fence_context;
 	job->fence_ctx = entity->fence_context;
-	*f = fence_get(&job->base.s_fence->finished);
+	*f = dma_fence_get(&job->base.s_fence->finished);
 	amdgpu_job_free_resources(job);
 	amdgpu_job_free_resources(job);
 	amd_sched_entity_push_job(&job->base);
 	amd_sched_entity_push_job(&job->base);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 {
 {
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
 	struct amdgpu_vm *vm = job->vm;
 	struct amdgpu_vm *vm = job->vm;
 
 
-	struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+	struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
 
 
 	if (fence == NULL && vm && !job->vm_id) {
 	if (fence == NULL && vm && !job->vm_id) {
 		struct amdgpu_ring *ring = job->ring;
 		struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 	return fence;
 	return fence;
 }
 }
 
 
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 {
 {
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	int r;
 	int r;
 
 
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 		DRM_ERROR("Error scheduling IBs (%d)\n", r);
 		DRM_ERROR("Error scheduling IBs (%d)\n", r);
 
 
 	/* if gpu reset, hw fence will be replaced here */
 	/* if gpu reset, hw fence will be replaced here */
-	fence_put(job->fence);
-	job->fence = fence_get(fence);
+	dma_fence_put(job->fence);
+	job->fence = dma_fence_get(fence);
 	amdgpu_job_free_resources(job);
 	amdgpu_job_free_resources(job);
 	return fence;
 	return fence;
 }
 }

+ 7 - 7
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

@@ -391,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 
 
 	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
-		struct fence *fence;
+		struct dma_fence *fence;
 
 
 		if (adev->mman.buffer_funcs_ring == NULL ||
 		if (adev->mman.buffer_funcs_ring == NULL ||
 		   !adev->mman.buffer_funcs_ring->ready) {
 		   !adev->mman.buffer_funcs_ring->ready) {
@@ -411,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 		amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
 		amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
 		amdgpu_bo_fence(bo, fence, false);
 		amdgpu_bo_fence(bo, fence, false);
 		amdgpu_bo_unreserve(bo);
 		amdgpu_bo_unreserve(bo);
-		fence_put(bo->tbo.moving);
-		bo->tbo.moving = fence_get(fence);
-		fence_put(fence);
+		dma_fence_put(bo->tbo.moving);
+		bo->tbo.moving = dma_fence_get(fence);
+		dma_fence_put(fence);
 	}
 	}
 	*bo_ptr = bo;
 	*bo_ptr = bo;
 
 
@@ -499,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring,
 			       struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
 			       struct amdgpu_bo *bo,
 			       struct reservation_object *resv,
 			       struct reservation_object *resv,
-			       struct fence **fence,
+			       struct dma_fence **fence,
 			       bool direct)
 			       bool direct)
 
 
 {
 {
@@ -531,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 				  struct amdgpu_ring *ring,
 				  struct amdgpu_ring *ring,
 				  struct amdgpu_bo *bo,
 				  struct amdgpu_bo *bo,
 				  struct reservation_object *resv,
 				  struct reservation_object *resv,
-				  struct fence **fence,
+				  struct dma_fence **fence,
 				  bool direct)
 				  bool direct)
 
 
 {
 {
@@ -941,7 +941,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  * @shared: true if fence should be added shared
  * @shared: true if fence should be added shared
  *
  *
  */
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared)
 		     bool shared)
 {
 {
 	struct reservation_object *resv = bo->tbo.resv;
 	struct reservation_object *resv = bo->tbo.resv;

+ 4 - 4
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

@@ -157,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *new_mem);
 				  struct ttm_mem_reg *new_mem);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared);
 		     bool shared);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring,
 			       struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
 			       struct amdgpu_bo *bo,
 			       struct reservation_object *resv,
 			       struct reservation_object *resv,
-			       struct fence **fence, bool direct);
+			       struct dma_fence **fence, bool direct);
 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 				  struct amdgpu_ring *ring,
 				  struct amdgpu_ring *ring,
 				  struct amdgpu_bo *bo,
 				  struct amdgpu_bo *bo,
 				  struct reservation_object *resv,
 				  struct reservation_object *resv,
-				  struct fence **fence,
+				  struct dma_fence **fence,
 				  bool direct);
 				  bool direct);
 
 
 
 
@@ -201,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 		     unsigned size, unsigned align);
 		     unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 			      struct amdgpu_sa_bo **sa_bo,
 			      struct amdgpu_sa_bo **sa_bo,
-			      struct fence *fence);
+			      struct dma_fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 #if defined(CONFIG_DEBUG_FS)
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 					 struct seq_file *m);
 					 struct seq_file *m);

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

@@ -67,7 +67,7 @@ struct amdgpu_fence_driver {
 	struct timer_list		fallback_timer;
 	struct timer_list		fallback_timer;
 	unsigned			num_fences_mask;
 	unsigned			num_fences_mask;
 	spinlock_t			lock;
 	spinlock_t			lock;
-	struct fence			**fences;
+	struct dma_fence		**fences;
 };
 };
 
 
 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
@@ -81,7 +81,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   unsigned irq_type);
 				   unsigned irq_type);
 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);

+ 12 - 12
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c

@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 	}
 	}
 	list_del_init(&sa_bo->olist);
 	list_del_init(&sa_bo->olist);
 	list_del_init(&sa_bo->flist);
 	list_del_init(&sa_bo->flist);
-	fence_put(sa_bo->fence);
+	dma_fence_put(sa_bo->fence);
 	kfree(sa_bo);
 	kfree(sa_bo);
 }
 }
 
 
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
 		if (sa_bo->fence == NULL ||
 		if (sa_bo->fence == NULL ||
-		    !fence_is_signaled(sa_bo->fence)) {
+		    !dma_fence_is_signaled(sa_bo->fence)) {
 			return;
 			return;
 		}
 		}
 		amdgpu_sa_bo_remove_locked(sa_bo);
 		amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
 }
 }
 
 
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
-				   struct fence **fences,
+				   struct dma_fence **fences,
 				   unsigned *tries)
 				   unsigned *tries)
 {
 {
 	struct amdgpu_sa_bo *best_bo = NULL;
 	struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 					 struct amdgpu_sa_bo, flist);
 					 struct amdgpu_sa_bo, flist);
 
 
-		if (!fence_is_signaled(sa_bo->fence)) {
+		if (!dma_fence_is_signaled(sa_bo->fence)) {
 			fences[i] = sa_bo->fence;
 			fences[i] = sa_bo->fence;
 			continue;
 			continue;
 		}
 		}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 		     struct amdgpu_sa_bo **sa_bo,
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 		     unsigned size, unsigned align)
 {
 {
-	struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+	struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
 	unsigned count;
 	unsigned count;
 	int i, r;
 	int i, r;
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 
 
 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 			if (fences[i])
 			if (fences[i])
-				fences[count++] = fence_get(fences[i]);
+				fences[count++] = dma_fence_get(fences[i]);
 
 
 		if (count) {
 		if (count) {
 			spin_unlock(&sa_manager->wq.lock);
 			spin_unlock(&sa_manager->wq.lock);
-			t = fence_wait_any_timeout(fences, count, false,
-						   MAX_SCHEDULE_TIMEOUT);
+			t = dma_fence_wait_any_timeout(fences, count, false,
+						       MAX_SCHEDULE_TIMEOUT);
 			for (i = 0; i < count; ++i)
 			for (i = 0; i < count; ++i)
-				fence_put(fences[i]);
+				dma_fence_put(fences[i]);
 
 
 			r = (t > 0) ? 0 : t;
 			r = (t > 0) ? 0 : t;
 			spin_lock(&sa_manager->wq.lock);
 			spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 }
 }
 
 
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
-		       struct fence *fence)
+		       struct dma_fence *fence)
 {
 {
 	struct amdgpu_sa_manager *sa_manager;
 	struct amdgpu_sa_manager *sa_manager;
 
 
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 
 
 	sa_manager = (*sa_bo)->manager;
 	sa_manager = (*sa_bo)->manager;
 	spin_lock(&sa_manager->wq.lock);
 	spin_lock(&sa_manager->wq.lock);
-	if (fence && !fence_is_signaled(fence)) {
+	if (fence && !dma_fence_is_signaled(fence)) {
 		uint32_t idx;
 		uint32_t idx;
 
 
-		(*sa_bo)->fence = fence_get(fence);
+		(*sa_bo)->fence = dma_fence_get(fence);
 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 	} else {
 	} else {

+ 25 - 23
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c

@@ -34,7 +34,7 @@
 
 
 struct amdgpu_sync_entry {
 struct amdgpu_sync_entry {
 	struct hlist_node	node;
 	struct hlist_node	node;
-	struct fence		*fence;
+	struct dma_fence	*fence;
 };
 };
 
 
 static struct kmem_cache *amdgpu_sync_slab;
 static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
  *
  *
  * Test if the fence was issued by us.
  * Test if the fence was issued by us.
  */
  */
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+				 struct dma_fence *f)
 {
 {
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
 
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
  *
  *
  * Extract who originally created the fence.
  * Extract who originally created the fence.
  */
  */
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
 {
 {
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
 
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
  *
  *
  * Either keep the existing fence or the new one, depending which one is later.
  * Either keep the existing fence or the new one, depending which one is later.
  */
  */
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+				   struct dma_fence *fence)
 {
 {
-	if (*keep && fence_is_later(*keep, fence))
+	if (*keep && dma_fence_is_later(*keep, fence))
 		return;
 		return;
 
 
-	fence_put(*keep);
-	*keep = fence_get(fence);
+	dma_fence_put(*keep);
+	*keep = dma_fence_get(fence);
 }
 }
 
 
 /**
 /**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * Tries to add the fence to an existing hash entry. Returns true when an entry
  * was found, false otherwise.
  * was found, false otherwise.
  */
  */
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
 {
 {
 	struct amdgpu_sync_entry *e;
 	struct amdgpu_sync_entry *e;
 
 
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
  *
  *
  */
  */
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct fence *f)
+		      struct dma_fence *f)
 {
 {
 	struct amdgpu_sync_entry *e;
 	struct amdgpu_sync_entry *e;
 
 
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	hash_add(sync->fences, &e->node, f->context);
 	hash_add(sync->fences, &e->node, f->context);
-	e->fence = fence_get(f);
+	e->fence = dma_fence_get(f);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     void *owner)
 		     void *owner)
 {
 {
 	struct reservation_object_list *flist;
 	struct reservation_object_list *flist;
-	struct fence *f;
+	struct dma_fence *f;
 	void *fence_owner;
 	void *fence_owner;
 	unsigned i;
 	unsigned i;
 	int r = 0;
 	int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
  * Returns the next fence not signaled yet without removing it from the sync
  * Returns the next fence not signaled yet without removing it from the sync
  * object.
  * object.
  */
  */
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
-				     struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+					 struct amdgpu_ring *ring)
 {
 {
 	struct amdgpu_sync_entry *e;
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
 	struct hlist_node *tmp;
 	int i;
 	int i;
 
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
-		struct fence *f = e->fence;
+		struct dma_fence *f = e->fence;
 		struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 		struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
 
 		if (ring && s_fence) {
 		if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 			 * when they are scheduled.
 			 * when they are scheduled.
 			 */
 			 */
 			if (s_fence->sched == &ring->sched) {
 			if (s_fence->sched == &ring->sched) {
-				if (fence_is_signaled(&s_fence->scheduled))
+				if (dma_fence_is_signaled(&s_fence->scheduled))
 					continue;
 					continue;
 
 
 				return &s_fence->scheduled;
 				return &s_fence->scheduled;
 			}
 			}
 		}
 		}
 
 
-		if (fence_is_signaled(f)) {
+		if (dma_fence_is_signaled(f)) {
 			hash_del(&e->node);
 			hash_del(&e->node);
-			fence_put(f);
+			dma_fence_put(f);
 			kmem_cache_free(amdgpu_sync_slab, e);
 			kmem_cache_free(amdgpu_sync_slab, e);
 			continue;
 			continue;
 		}
 		}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
  *
  *
  * Get and removes the next fence from the sync object not signaled yet.
  * Get and removes the next fence from the sync object not signaled yet.
  */
  */
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
 {
 	struct amdgpu_sync_entry *e;
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
 	struct hlist_node *tmp;
-	struct fence *f;
+	struct dma_fence *f;
 	int i;
 	int i;
 
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 		hash_del(&e->node);
 		hash_del(&e->node);
 		kmem_cache_free(amdgpu_sync_slab, e);
 		kmem_cache_free(amdgpu_sync_slab, e);
 
 
-		if (!fence_is_signaled(f))
+		if (!dma_fence_is_signaled(f))
 			return f;
 			return f;
 
 
-		fence_put(f);
+		dma_fence_put(f);
 	}
 	}
 	return NULL;
 	return NULL;
 }
 }
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
 
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 		hash_del(&e->node);
 		hash_del(&e->node);
-		fence_put(e->fence);
+		dma_fence_put(e->fence);
 		kmem_cache_free(amdgpu_sync_slab, e);
 		kmem_cache_free(amdgpu_sync_slab, e);
 	}
 	}
 
 
-	fence_put(sync->last_vm_update);
+	dma_fence_put(sync->last_vm_update);
 }
 }
 
 
 /**
 /**

+ 5 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h

@@ -26,7 +26,7 @@
 
 
 #include <linux/hashtable.h>
 #include <linux/hashtable.h>
 
 
-struct fence;
+struct dma_fence;
 struct reservation_object;
 struct reservation_object;
 struct amdgpu_device;
 struct amdgpu_device;
 struct amdgpu_ring;
 struct amdgpu_ring;
@@ -36,19 +36,19 @@ struct amdgpu_ring;
  */
  */
 struct amdgpu_sync {
 struct amdgpu_sync {
 	DECLARE_HASHTABLE(fences, 4);
 	DECLARE_HASHTABLE(fences, 4);
-	struct fence	*last_vm_update;
+	struct dma_fence	*last_vm_update;
 };
 };
 
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
 void amdgpu_sync_create(struct amdgpu_sync *sync);
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct fence *f);
+		      struct dma_fence *f);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     struct reservation_object *resv,
 		     void *owner);
 		     void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 				     struct amdgpu_ring *ring);
 				     struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
 int amdgpu_sync_init(void);
 int amdgpu_sync_init(void);
 void amdgpu_sync_fini(void);
 void amdgpu_sync_fini(void);

+ 6 - 6
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c

@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		void *gtt_map, *vram_map;
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
 		void **vram_start, **vram_end;
-		struct fence *fence = NULL;
+		struct dma_fence *fence = NULL;
 
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 			goto out_lclean_unpin;
 		}
 		}
 
 
-		r = fence_wait(fence, false);
+		r = dma_fence_wait(fence, false);
 		if (r) {
 		if (r) {
 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
 			goto out_lclean_unpin;
 			goto out_lclean_unpin;
 		}
 		}
 
 
-		fence_put(fence);
+		dma_fence_put(fence);
 
 
 		r = amdgpu_bo_kmap(vram_obj, &vram_map);
 		r = amdgpu_bo_kmap(vram_obj, &vram_map);
 		if (r) {
 		if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 			goto out_lclean_unpin;
 		}
 		}
 
 
-		r = fence_wait(fence, false);
+		r = dma_fence_wait(fence, false);
 		if (r) {
 		if (r) {
 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
 			goto out_lclean_unpin;
 			goto out_lclean_unpin;
 		}
 		}
 
 
-		fence_put(fence);
+		dma_fence_put(fence);
 
 
 		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
 		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
 		if (r) {
@@ -216,7 +216,7 @@ out_lclean:
 			amdgpu_bo_unref(&gtt_obj[i]);
 			amdgpu_bo_unref(&gtt_obj[i]);
 		}
 		}
 		if (fence)
 		if (fence)
-			fence_put(fence);
+			dma_fence_put(fence);
 		break;
 		break;
 	}
 	}
 
 

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
 			     __field(struct amdgpu_device *, adev)
 			     __field(struct amdgpu_device *, adev)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amdgpu_ib *, ib)
 			     __field(struct amdgpu_ib *, ib)
-			     __field(struct fence *, fence)
+			     __field(struct dma_fence *, fence)
 			     __field(char *, ring_name)
 			     __field(char *, ring_name)
 			     __field(u32, num_ibs)
 			     __field(u32, num_ibs)
 			     ),
 			     ),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
 			     __field(struct amdgpu_device *, adev)
 			     __field(struct amdgpu_device *, adev)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amdgpu_ib *, ib)
 			     __field(struct amdgpu_ib *, ib)
-			     __field(struct fence *, fence)
+			     __field(struct dma_fence *, fence)
 			     __field(char *, ring_name)
 			     __field(char *, ring_name)
 			     __field(u32, num_ibs)
 			     __field(u32, num_ibs)
 			     ),
 			     ),

+ 9 - 9
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

@@ -287,7 +287,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	struct drm_mm_node *old_mm, *new_mm;
 	struct drm_mm_node *old_mm, *new_mm;
 	uint64_t old_start, old_size, new_start, new_size;
 	uint64_t old_start, old_size, new_start, new_size;
 	unsigned long num_pages;
 	unsigned long num_pages;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	int r;
 	int r;
 
 
 	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
 	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
@@ -313,7 +313,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	num_pages = new_mem->num_pages;
 	num_pages = new_mem->num_pages;
 	while (num_pages) {
 	while (num_pages) {
 		unsigned long cur_pages = min(old_size, new_size);
 		unsigned long cur_pages = min(old_size, new_size);
-		struct fence *next;
+		struct dma_fence *next;
 
 
 		r = amdgpu_copy_buffer(ring, old_start, new_start,
 		r = amdgpu_copy_buffer(ring, old_start, new_start,
 				       cur_pages * PAGE_SIZE,
 				       cur_pages * PAGE_SIZE,
@@ -321,7 +321,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 		if (r)
 		if (r)
 			goto error;
 			goto error;
 
 
-		fence_put(fence);
+		dma_fence_put(fence);
 		fence = next;
 		fence = next;
 
 
 		num_pages -= cur_pages;
 		num_pages -= cur_pages;
@@ -353,13 +353,13 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	}
 	}
 
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
-	fence_put(fence);
+	dma_fence_put(fence);
 	return r;
 	return r;
 
 
 error:
 error:
 	if (fence)
 	if (fence)
-		fence_wait(fence, false);
-	fence_put(fence);
+		dma_fence_wait(fence, false);
+	dma_fence_put(fence);
 	return r;
 	return r;
 }
 }
 
 
@@ -1316,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		       uint64_t dst_offset,
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
 		       struct reservation_object *resv,
-		       struct fence **fence, bool direct_submit)
+		       struct dma_fence **fence, bool direct_submit)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
@@ -1363,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 	if (direct_submit) {
 	if (direct_submit) {
 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
 				       NULL, NULL, fence);
 				       NULL, NULL, fence);
-		job->fence = fence_get(*fence);
+		job->fence = dma_fence_get(*fence);
 		if (r)
 		if (r)
 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
 		amdgpu_job_free(job);
 		amdgpu_job_free(job);
@@ -1384,7 +1384,7 @@ error_free:
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		uint32_t src_data,
 		uint32_t src_data,
 		struct reservation_object *resv,
 		struct reservation_object *resv,
-		struct fence **fence)
+		struct dma_fence **fence)
 {
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h

@@ -78,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		       uint64_t dst_offset,
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
 		       struct reservation_object *resv,
-		       struct fence **fence, bool direct_submit);
+		       struct dma_fence **fence, bool direct_submit);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 			uint32_t src_data,
 			uint32_t src_data,
 			struct reservation_object *resv,
 			struct reservation_object *resv,
-			struct fence **fence);
+			struct dma_fence **fence);
 
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);

+ 13 - 13
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 	for (i = 0; i < adev->uvd.max_handles; ++i) {
 	for (i = 0; i < adev->uvd.max_handles; ++i) {
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
-			struct fence *fence;
+			struct dma_fence *fence;
 
 
 			r = amdgpu_uvd_get_destroy_msg(ring, handle,
 			r = amdgpu_uvd_get_destroy_msg(ring, handle,
 						       false, &fence);
 						       false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 				continue;
 				continue;
 			}
 			}
 
 
-			fence_wait(fence, false);
-			fence_put(fence);
+			dma_fence_wait(fence, false);
+			dma_fence_put(fence);
 
 
 			adev->uvd.filp[i] = NULL;
 			adev->uvd.filp[i] = NULL;
 			atomic_set(&adev->uvd.handles[i], 0);
 			atomic_set(&adev->uvd.handles[i], 0);
@@ -912,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 }
 }
 
 
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
-			       bool direct, struct fence **fence)
+			       bool direct, struct dma_fence **fence)
 {
 {
 	struct ttm_validate_buffer tv;
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
 	struct list_head head;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct amdgpu_ib *ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t addr;
 	uint64_t addr;
 	int i, r;
 	int i, r;
@@ -963,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
 
 	if (direct) {
 	if (direct) {
 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-		job->fence = fence_get(f);
+		job->fence = dma_fence_get(f);
 		if (r)
 		if (r)
 			goto err_free;
 			goto err_free;
 
 
@@ -978,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
 
 	if (fence)
 	if (fence)
-		*fence = fence_get(f);
+		*fence = dma_fence_get(f);
 	amdgpu_bo_unref(&bo);
 	amdgpu_bo_unref(&bo);
-	fence_put(f);
+	dma_fence_put(f);
 
 
 	return 0;
 	return 0;
 
 
@@ -996,7 +996,7 @@ err:
    crash the vcpu so just try to emmit a dummy create/destroy msg to
    crash the vcpu so just try to emmit a dummy create/destroy msg to
    avoid this */
    avoid this */
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence)
+			      struct dma_fence **fence)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
 	struct amdgpu_bo *bo;
@@ -1046,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 }
 }
 
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence)
+			       bool direct, struct dma_fence **fence)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
 	struct amdgpu_bo *bo;
@@ -1133,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
  */
  */
 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	long r;
 	long r;
 
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1148,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		goto error;
 		goto error;
 	}
 	}
 
 
-	r = fence_wait_timeout(fence, false, timeout);
+	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -1159,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		r = 0;
 		r = 0;
 	}
 	}
 
 
-	fence_put(fence);
+	dma_fence_put(fence);
 
 
 error:
 error:
 	return r;
 	return r;

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h

@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence);
+			      struct dma_fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence);
+			       bool direct, struct dma_fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 			     struct drm_file *filp);
 			     struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);

+ 13 - 13
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

@@ -396,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  * Open up a stream for HW test
  * Open up a stream for HW test
  */
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence)
+			      struct dma_fence **fence)
 {
 {
 	const unsigned ib_size_dw = 1024;
 	const unsigned ib_size_dw = 1024;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct amdgpu_ib *ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint64_t dummy;
 	uint64_t dummy;
 	int i, r;
 	int i, r;
 
 
@@ -451,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 		ib->ptr[i] = 0x0;
 		ib->ptr[i] = 0x0;
 
 
 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-	job->fence = fence_get(f);
+	job->fence = dma_fence_get(f);
 	if (r)
 	if (r)
 		goto err;
 		goto err;
 
 
 	amdgpu_job_free(job);
 	amdgpu_job_free(job);
 	if (fence)
 	if (fence)
-		*fence = fence_get(f);
-	fence_put(f);
+		*fence = dma_fence_get(f);
+	dma_fence_put(f);
 	return 0;
 	return 0;
 
 
 err:
 err:
@@ -477,12 +477,12 @@ err:
  * Close up a stream for HW test or if userspace failed to do so
  * Close up a stream for HW test or if userspace failed to do so
  */
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence)
+			       bool direct, struct dma_fence **fence)
 {
 {
 	const unsigned ib_size_dw = 1024;
 	const unsigned ib_size_dw = 1024;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct amdgpu_ib *ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	int i, r;
 	int i, r;
 
 
 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -514,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 
 	if (direct) {
 	if (direct) {
 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-		job->fence = fence_get(f);
+		job->fence = dma_fence_get(f);
 		if (r)
 		if (r)
 			goto err;
 			goto err;
 
 
@@ -527,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 	}
 	}
 
 
 	if (fence)
 	if (fence)
-		*fence = fence_get(f);
-	fence_put(f);
+		*fence = dma_fence_get(f);
+	dma_fence_put(f);
 	return 0;
 	return 0;
 
 
 err:
 err:
@@ -965,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
  */
  */
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	long r;
 	long r;
 
 
 	/* skip vce ring1/2 ib test for now, since it's not reliable */
 	/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -984,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		goto error;
 		goto error;
 	}
 	}
 
 
-	r = fence_wait_timeout(fence, false, timeout);
+	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -995,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 		r = 0;
 		r = 0;
 	}
 	}
 error:
 error:
-	fence_put(fence);
+	dma_fence_put(fence);
 	return r;
 	return r;
 }
 }

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h

@@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct fence **fence);
+			      struct dma_fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       bool direct, struct fence **fence);
+			       bool direct, struct dma_fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);

+ 40 - 39
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -25,7 +25,7 @@
  *          Alex Deucher
  *          Alex Deucher
  *          Jerome Glisse
  *          Jerome Glisse
  */
  */
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
 #include <drm/drmP.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 #include "amdgpu.h"
@@ -199,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  */
  */
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync, struct fence *fence,
+		      struct amdgpu_sync *sync, struct dma_fence *fence,
 		      struct amdgpu_job *job)
 		      struct amdgpu_job *job)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t fence_context = adev->fence_context + ring->idx;
 	uint64_t fence_context = adev->fence_context + ring->idx;
-	struct fence *updates = sync->last_vm_update;
+	struct dma_fence *updates = sync->last_vm_update;
 	struct amdgpu_vm_id *id, *idle;
 	struct amdgpu_vm_id *id, *idle;
-	struct fence **fences;
+	struct dma_fence **fences;
 	unsigned i;
 	unsigned i;
 	int r = 0;
 	int r = 0;
 
 
@@ -230,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	if (&idle->list == &adev->vm_manager.ids_lru) {
 	if (&idle->list == &adev->vm_manager.ids_lru) {
 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
-		struct fence_array *array;
+		struct dma_fence_array *array;
 		unsigned j;
 		unsigned j;
 
 
 		for (j = 0; j < i; ++j)
 		for (j = 0; j < i; ++j)
-			fence_get(fences[j]);
+			dma_fence_get(fences[j]);
 
 
-		array = fence_array_create(i, fences, fence_context,
+		array = dma_fence_array_create(i, fences, fence_context,
 					   seqno, true);
 					   seqno, true);
 		if (!array) {
 		if (!array) {
 			for (j = 0; j < i; ++j)
 			for (j = 0; j < i; ++j)
-				fence_put(fences[j]);
+				dma_fence_put(fences[j]);
 			kfree(fences);
 			kfree(fences);
 			r = -ENOMEM;
 			r = -ENOMEM;
 			goto error;
 			goto error;
@@ -248,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
 
 
 
 		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
 		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
-		fence_put(&array->base);
+		dma_fence_put(&array->base);
 		if (r)
 		if (r)
 			goto error;
 			goto error;
 
 
@@ -262,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	/* Check if we can use a VMID already assigned to this VM */
 	/* Check if we can use a VMID already assigned to this VM */
 	i = ring->idx;
 	i = ring->idx;
 	do {
 	do {
-		struct fence *flushed;
+		struct dma_fence *flushed;
 
 
 		id = vm->ids[i++];
 		id = vm->ids[i++];
 		if (i == AMDGPU_MAX_RINGS)
 		if (i == AMDGPU_MAX_RINGS)
@@ -284,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 			continue;
 			continue;
 
 
 		if (id->last_flush->context != fence_context &&
 		if (id->last_flush->context != fence_context &&
-		    !fence_is_signaled(id->last_flush))
+		    !dma_fence_is_signaled(id->last_flush))
 			continue;
 			continue;
 
 
 		flushed  = id->flushed_updates;
 		flushed  = id->flushed_updates;
 		if (updates &&
 		if (updates &&
-		    (!flushed || fence_is_later(updates, flushed)))
+		    (!flushed || dma_fence_is_later(updates, flushed)))
 			continue;
 			continue;
 
 
 		/* Good we can use this VMID. Remember this submission as
 		/* Good we can use this VMID. Remember this submission as
@@ -320,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	if (r)
 	if (r)
 		goto error;
 		goto error;
 
 
-	fence_put(id->first);
-	id->first = fence_get(fence);
+	dma_fence_put(id->first);
+	id->first = dma_fence_get(fence);
 
 
-	fence_put(id->last_flush);
+	dma_fence_put(id->last_flush);
 	id->last_flush = NULL;
 	id->last_flush = NULL;
 
 
-	fence_put(id->flushed_updates);
-	id->flushed_updates = fence_get(updates);
+	dma_fence_put(id->flushed_updates);
+	id->flushed_updates = dma_fence_get(updates);
 
 
 	id->pd_gpu_addr = job->vm_pd_addr;
 	id->pd_gpu_addr = job->vm_pd_addr;
 	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
 	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -398,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 
 
 	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
 	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
 	    amdgpu_vm_is_gpu_reset(adev, id))) {
 	    amdgpu_vm_is_gpu_reset(adev, id))) {
-		struct fence *fence;
+		struct dma_fence *fence;
 
 
 		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
 		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
 		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
 		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -408,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 			return r;
 			return r;
 
 
 		mutex_lock(&adev->vm_manager.lock);
 		mutex_lock(&adev->vm_manager.lock);
-		fence_put(id->last_flush);
+		dma_fence_put(id->last_flush);
 		id->last_flush = fence;
 		id->last_flush = fence;
 		mutex_unlock(&adev->vm_manager.lock);
 		mutex_unlock(&adev->vm_manager.lock);
 	}
 	}
@@ -542,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 			      struct amdgpu_bo *bo)
 			      struct amdgpu_bo *bo)
 {
 {
 	struct amdgpu_ring *ring;
 	struct amdgpu_ring *ring;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
 	struct amdgpu_pte_update_params params;
 	unsigned entries;
 	unsigned entries;
@@ -583,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 		goto error_free;
 		goto error_free;
 
 
 	amdgpu_bo_fence(bo, fence, true);
 	amdgpu_bo_fence(bo, fence, true);
-	fence_put(fence);
+	dma_fence_put(fence);
 	return 0;
 	return 0;
 
 
 error_free:
 error_free:
@@ -640,7 +640,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	unsigned count = 0, pt_idx, ndw;
 	unsigned count = 0, pt_idx, ndw;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
 	struct amdgpu_pte_update_params params;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 
 
 	int r;
 	int r;
 
 
@@ -750,9 +750,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 		goto error_free;
 		goto error_free;
 
 
 	amdgpu_bo_fence(vm->page_directory, fence, true);
 	amdgpu_bo_fence(vm->page_directory, fence, true);
-	fence_put(vm->page_directory_fence);
-	vm->page_directory_fence = fence_get(fence);
-	fence_put(fence);
+	dma_fence_put(vm->page_directory_fence);
+	vm->page_directory_fence = dma_fence_get(fence);
+	dma_fence_put(fence);
 
 
 	return 0;
 	return 0;
 
 
@@ -938,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
  * Returns 0 for success, -EINVAL for failure.
  * Returns 0 for success, -EINVAL for failure.
  */
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
-				       struct fence *exclusive,
+				       struct dma_fence *exclusive,
 				       uint64_t src,
 				       uint64_t src,
 				       dma_addr_t *pages_addr,
 				       dma_addr_t *pages_addr,
 				       struct amdgpu_vm *vm,
 				       struct amdgpu_vm *vm,
 				       uint64_t start, uint64_t last,
 				       uint64_t start, uint64_t last,
 				       uint32_t flags, uint64_t addr,
 				       uint32_t flags, uint64_t addr,
-				       struct fence **fence)
+				       struct dma_fence **fence)
 {
 {
 	struct amdgpu_ring *ring;
 	struct amdgpu_ring *ring;
 	void *owner = AMDGPU_FENCE_OWNER_VM;
 	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned nptes, ncmds, ndw;
 	unsigned nptes, ncmds, ndw;
 	struct amdgpu_job *job;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
 	struct amdgpu_pte_update_params params;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	int r;
 	int r;
 
 
 	memset(&params, 0, sizeof(params));
 	memset(&params, 0, sizeof(params));
@@ -1054,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
 
 	amdgpu_bo_fence(vm->page_directory, f, true);
 	amdgpu_bo_fence(vm->page_directory, f, true);
 	if (fence) {
 	if (fence) {
-		fence_put(*fence);
-		*fence = fence_get(f);
+		dma_fence_put(*fence);
+		*fence = dma_fence_get(f);
 	}
 	}
-	fence_put(f);
+	dma_fence_put(f);
 	return 0;
 	return 0;
 
 
 error_free:
 error_free:
@@ -1083,14 +1083,14 @@ error_free:
  * Returns 0 for success, -EINVAL for failure.
  * Returns 0 for success, -EINVAL for failure.
  */
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
-				      struct fence *exclusive,
+				      struct dma_fence *exclusive,
 				      uint32_t gtt_flags,
 				      uint32_t gtt_flags,
 				      dma_addr_t *pages_addr,
 				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
 				      struct amdgpu_bo_va_mapping *mapping,
 				      uint32_t flags,
 				      uint32_t flags,
 				      struct drm_mm_node *nodes,
 				      struct drm_mm_node *nodes,
-				      struct fence **fence)
+				      struct dma_fence **fence)
 {
 {
 	uint64_t pfn, src = 0, start = mapping->it.start;
 	uint64_t pfn, src = 0, start = mapping->it.start;
 	int r;
 	int r;
@@ -1178,7 +1178,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	uint32_t gtt_flags, flags;
 	uint32_t gtt_flags, flags;
 	struct ttm_mem_reg *mem;
 	struct ttm_mem_reg *mem;
 	struct drm_mm_node *nodes;
 	struct drm_mm_node *nodes;
-	struct fence *exclusive;
+	struct dma_fence *exclusive;
 	int r;
 	int r;
 
 
 	if (clear) {
 	if (clear) {
@@ -1562,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		kfree(mapping);
 		kfree(mapping);
 	}
 	}
 
 
-	fence_put(bo_va->last_pt_update);
+	dma_fence_put(bo_va->last_pt_update);
 	kfree(bo_va);
 	kfree(bo_va);
 }
 }
 
 
@@ -1725,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
 
 	amdgpu_bo_unref(&vm->page_directory->shadow);
 	amdgpu_bo_unref(&vm->page_directory->shadow);
 	amdgpu_bo_unref(&vm->page_directory);
 	amdgpu_bo_unref(&vm->page_directory);
-	fence_put(vm->page_directory_fence);
+	dma_fence_put(vm->page_directory_fence);
 }
 }
 
 
 /**
 /**
@@ -1749,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 			      &adev->vm_manager.ids_lru);
 			      &adev->vm_manager.ids_lru);
 	}
 	}
 
 
-	adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+	adev->vm_manager.fence_context =
+		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		adev->vm_manager.seqno[i] = 0;
 		adev->vm_manager.seqno[i] = 0;
 
 
@@ -1771,8 +1772,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
 	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
 	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
 		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
 		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
 
 
-		fence_put(adev->vm_manager.ids[i].first);
+		dma_fence_put(adev->vm_manager.ids[i].first);
 		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
 		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
-		fence_put(id->flushed_updates);
+		dma_fence_put(id->flushed_updates);
 	}
 	}
 }
 }

+ 5 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

@@ -94,7 +94,7 @@ struct amdgpu_vm {
 	/* contains the page directory */
 	/* contains the page directory */
 	struct amdgpu_bo	*page_directory;
 	struct amdgpu_bo	*page_directory;
 	unsigned		max_pde_used;
 	unsigned		max_pde_used;
-	struct fence		*page_directory_fence;
+	struct dma_fence		*page_directory_fence;
 	uint64_t		last_eviction_counter;
 	uint64_t		last_eviction_counter;
 
 
 	/* array of page tables, one for each page directory entry */
 	/* array of page tables, one for each page directory entry */
@@ -115,14 +115,14 @@ struct amdgpu_vm {
 
 
 struct amdgpu_vm_id {
 struct amdgpu_vm_id {
 	struct list_head	list;
 	struct list_head	list;
-	struct fence		*first;
+	struct dma_fence		*first;
 	struct amdgpu_sync	active;
 	struct amdgpu_sync	active;
-	struct fence		*last_flush;
+	struct dma_fence		*last_flush;
 	atomic64_t		owner;
 	atomic64_t		owner;
 
 
 	uint64_t		pd_gpu_addr;
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
 	/* last flushed PD/PT update */
-	struct fence		*flushed_updates;
+	struct dma_fence		*flushed_updates;
 
 
 	uint32_t                current_gpu_reset_count;
 	uint32_t                current_gpu_reset_count;
 
 
@@ -172,7 +172,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 				  struct amdgpu_vm *vm);
 				  struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync, struct fence *fence,
+		      struct amdgpu_sync *sync, struct dma_fence *fence,
 		      struct amdgpu_job *job);
 		      struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/cik_sdma.c

@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	unsigned index;
 	u32 tmp = 0;
 	u32 tmp = 0;
 	u64 gpu_addr;
 	u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err1;
 		goto err1;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 
 err1:
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 err0:
 	amdgpu_wb_free(adev, index);
 	amdgpu_wb_free(adev, index);
 	return r;
 	return r;

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c

@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint32_t scratch;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	uint32_t tmp = 0;
 	long r;
 	long r;
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err2;
 		goto err2;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 
 err2:
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err1:
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 	return r;

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c

@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint32_t scratch;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	uint32_t tmp = 0;
 	long r;
 	long r;
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err2;
 		goto err2;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 
 err2:
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err1:
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 	return r;

+ 6 - 6
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

@@ -798,7 +798,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	uint32_t scratch;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	uint32_t tmp = 0;
 	long r;
 	long r;
@@ -824,7 +824,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err2;
 		goto err2;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -844,7 +844,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	}
 	}
 err2:
 err2:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err1:
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 	return r;
@@ -1564,7 +1564,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 {
 {
 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	int r, i;
 	int r, i;
 	u32 tmp;
 	u32 tmp;
 	unsigned total_size, vgpr_offset, sgpr_offset;
 	unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1697,7 +1697,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	}
 	}
 
 
 	/* wait for the GPU to finish processing the IB */
 	/* wait for the GPU to finish processing the IB */
-	r = fence_wait(f, false);
+	r = dma_fence_wait(f, false);
 	if (r) {
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto fail;
 		goto fail;
@@ -1718,7 +1718,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
 
 fail:
 fail:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 
 
 	return r;
 	return r;
 }
 }

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c

@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	unsigned index;
 	u32 tmp = 0;
 	u32 tmp = 0;
 	u64 gpu_addr;
 	u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err1;
 		goto err1;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 
 err1:
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 err0:
 	amdgpu_wb_free(adev, index);
 	amdgpu_wb_free(adev, index);
 	return r;
 	return r;

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c

@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	unsigned index;
 	u32 tmp = 0;
 	u32 tmp = 0;
 	u64 gpu_addr;
 	u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err1;
 		goto err1;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	}
 	}
 err1:
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 err0:
 	amdgpu_wb_free(adev, index);
 	amdgpu_wb_free(adev, index);
 	return r;
 	return r;

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/si_dma.c

@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct amdgpu_ib ib;
-	struct fence *f = NULL;
+	struct dma_fence *f = NULL;
 	unsigned index;
 	unsigned index;
 	u32 tmp = 0;
 	u32 tmp = 0;
 	u64 gpu_addr;
 	u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	if (r)
 	if (r)
 		goto err1;
 		goto err1;
 
 
-	r = fence_wait_timeout(f, false, timeout);
+	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
 	if (r == 0) {
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		DRM_ERROR("amdgpu: IB test timed out\n");
 		r = -ETIMEDOUT;
 		r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 
 err1:
 err1:
 	amdgpu_ib_free(adev, &ib, NULL);
 	amdgpu_ib_free(adev, &ib, NULL);
-	fence_put(f);
+	dma_fence_put(f);
 err0:
 err0:
 	amdgpu_wb_free(adev, index);
 	amdgpu_wb_free(adev, index);
 	return r;
 	return r;

+ 2 - 2
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h

@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
 	    TP_STRUCT__entry(
 	    TP_STRUCT__entry(
 			     __field(struct amd_sched_entity *, entity)
 			     __field(struct amd_sched_entity *, entity)
 			     __field(struct amd_sched_job *, sched_job)
 			     __field(struct amd_sched_job *, sched_job)
-			     __field(struct fence *, fence)
+			     __field(struct dma_fence *, fence)
 			     __field(const char *, name)
 			     __field(const char *, name)
 			     __field(u32, job_count)
 			     __field(u32, job_count)
 			     __field(int, hw_job_count)
 			     __field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
 	    TP_PROTO(struct amd_sched_fence *fence),
 	    TP_PROTO(struct amd_sched_fence *fence),
 	    TP_ARGS(fence),
 	    TP_ARGS(fence),
 	    TP_STRUCT__entry(
 	    TP_STRUCT__entry(
-		    __field(struct fence *, fence)
+		    __field(struct dma_fence *, fence)
 		    ),
 		    ),
 
 
 	    TP_fast_assign(
 	    TP_fast_assign(

+ 34 - 33
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

@@ -32,7 +32,7 @@
 
 
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
 
 struct kmem_cache *sched_fence_slab;
 struct kmem_cache *sched_fence_slab;
 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 		return r;
 		return r;
 
 
 	atomic_set(&entity->fence_seq, 0);
 	atomic_set(&entity->fence_seq, 0);
-	entity->fence_context = fence_context_alloc(2);
+	entity->fence_context = dma_fence_context_alloc(2);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 	kfifo_free(&entity->job_queue);
 	kfifo_free(&entity->job_queue);
 }
 }
 
 
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 {
 	struct amd_sched_entity *entity =
 	struct amd_sched_entity *entity =
 		container_of(cb, struct amd_sched_entity, cb);
 		container_of(cb, struct amd_sched_entity, cb);
 	entity->dependency = NULL;
 	entity->dependency = NULL;
-	fence_put(f);
+	dma_fence_put(f);
 	amd_sched_wakeup(entity->sched);
 	amd_sched_wakeup(entity->sched);
 }
 }
 
 
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 {
 	struct amd_sched_entity *entity =
 	struct amd_sched_entity *entity =
 		container_of(cb, struct amd_sched_entity, cb);
 		container_of(cb, struct amd_sched_entity, cb);
 	entity->dependency = NULL;
 	entity->dependency = NULL;
-	fence_put(f);
+	dma_fence_put(f);
 }
 }
 
 
 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 {
 {
 	struct amd_gpu_scheduler *sched = entity->sched;
 	struct amd_gpu_scheduler *sched = entity->sched;
-	struct fence * fence = entity->dependency;
+	struct dma_fence * fence = entity->dependency;
 	struct amd_sched_fence *s_fence;
 	struct amd_sched_fence *s_fence;
 
 
 	if (fence->context == entity->fence_context) {
 	if (fence->context == entity->fence_context) {
 		/* We can ignore fences from ourself */
 		/* We can ignore fences from ourself */
-		fence_put(entity->dependency);
+		dma_fence_put(entity->dependency);
 		return false;
 		return false;
 	}
 	}
 
 
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 		 * Fence is from the same scheduler, only need to wait for
 		 * Fence is from the same scheduler, only need to wait for
 		 * it to be scheduled
 		 * it to be scheduled
 		 */
 		 */
-		fence = fence_get(&s_fence->scheduled);
-		fence_put(entity->dependency);
+		fence = dma_fence_get(&s_fence->scheduled);
+		dma_fence_put(entity->dependency);
 		entity->dependency = fence;
 		entity->dependency = fence;
-		if (!fence_add_callback(fence, &entity->cb,
-					amd_sched_entity_clear_dep))
+		if (!dma_fence_add_callback(fence, &entity->cb,
+					    amd_sched_entity_clear_dep))
 			return true;
 			return true;
 
 
 		/* Ignore it when it is already scheduled */
 		/* Ignore it when it is already scheduled */
-		fence_put(fence);
+		dma_fence_put(fence);
 		return false;
 		return false;
 	}
 	}
 
 
-	if (!fence_add_callback(entity->dependency, &entity->cb,
-				amd_sched_entity_wakeup))
+	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+				    amd_sched_entity_wakeup))
 		return true;
 		return true;
 
 
-	fence_put(entity->dependency);
+	dma_fence_put(entity->dependency);
 	return false;
 	return false;
 }
 }
 
 
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work)
 	sched->ops->free_job(s_job);
 	sched->ops->free_job(s_job);
 }
 }
 
 
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
 {
 {
 	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
 	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
 						 finish_cb);
 						 finish_cb);
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 
 
 	spin_lock(&sched->job_list_lock);
 	spin_lock(&sched->job_list_lock);
 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
 	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
-		if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
-			fence_put(s_job->s_fence->parent);
+		if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+			dma_fence_put(s_job->s_fence->parent);
 			s_job->s_fence->parent = NULL;
 			s_job->s_fence->parent = NULL;
 		}
 		}
 	}
 	}
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
 
 
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 		struct amd_sched_fence *s_fence = s_job->s_fence;
 		struct amd_sched_fence *s_fence = s_job->s_fence;
-		struct fence *fence;
+		struct dma_fence *fence;
 
 
 		spin_unlock(&sched->job_list_lock);
 		spin_unlock(&sched->job_list_lock);
 		fence = sched->ops->run_job(s_job);
 		fence = sched->ops->run_job(s_job);
 		atomic_inc(&sched->hw_rq_count);
 		atomic_inc(&sched->hw_rq_count);
 		if (fence) {
 		if (fence) {
-			s_fence->parent = fence_get(fence);
-			r = fence_add_callback(fence, &s_fence->cb,
-					       amd_sched_process_job);
+			s_fence->parent = dma_fence_get(fence);
+			r = dma_fence_add_callback(fence, &s_fence->cb,
+						   amd_sched_process_job);
 			if (r == -ENOENT)
 			if (r == -ENOENT)
 				amd_sched_process_job(fence, &s_fence->cb);
 				amd_sched_process_job(fence, &s_fence->cb);
 			else if (r)
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 					  r);
-			fence_put(fence);
+			dma_fence_put(fence);
 		} else {
 		} else {
 			DRM_ERROR("Failed to run job!\n");
 			DRM_ERROR("Failed to run job!\n");
 			amd_sched_process_job(NULL, &s_fence->cb);
 			amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 	struct amd_sched_entity *entity = sched_job->s_entity;
 	struct amd_sched_entity *entity = sched_job->s_entity;
 
 
 	trace_amd_sched_job(sched_job);
 	trace_amd_sched_job(sched_job);
-	fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
-			   amd_sched_job_finish_cb);
+	dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+			       amd_sched_job_finish_cb);
 	wait_event(entity->sched->job_scheduled,
 	wait_event(entity->sched->job_scheduled,
 		   amd_sched_entity_in(sched_job));
 		   amd_sched_entity_in(sched_job));
 }
 }
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
 	return entity;
 	return entity;
 }
 }
 
 
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 {
 {
 	struct amd_sched_fence *s_fence =
 	struct amd_sched_fence *s_fence =
 		container_of(cb, struct amd_sched_fence, cb);
 		container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
 	amd_sched_fence_finished(s_fence);
 	amd_sched_fence_finished(s_fence);
 
 
 	trace_amd_sched_process_job(s_fence);
 	trace_amd_sched_process_job(s_fence);
-	fence_put(&s_fence->finished);
+	dma_fence_put(&s_fence->finished);
 	wake_up_interruptible(&sched->wake_up_worker);
 	wake_up_interruptible(&sched->wake_up_worker);
 }
 }
 
 
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param)
 		struct amd_sched_entity *entity = NULL;
 		struct amd_sched_entity *entity = NULL;
 		struct amd_sched_fence *s_fence;
 		struct amd_sched_fence *s_fence;
 		struct amd_sched_job *sched_job;
 		struct amd_sched_job *sched_job;
-		struct fence *fence;
+		struct dma_fence *fence;
 
 
 		wait_event_interruptible(sched->wake_up_worker,
 		wait_event_interruptible(sched->wake_up_worker,
 					 (!amd_sched_blocked(sched) &&
 					 (!amd_sched_blocked(sched) &&
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param)
 		fence = sched->ops->run_job(sched_job);
 		fence = sched->ops->run_job(sched_job);
 		amd_sched_fence_scheduled(s_fence);
 		amd_sched_fence_scheduled(s_fence);
 		if (fence) {
 		if (fence) {
-			s_fence->parent = fence_get(fence);
-			r = fence_add_callback(fence, &s_fence->cb,
-					       amd_sched_process_job);
+			s_fence->parent = dma_fence_get(fence);
+			r = dma_fence_add_callback(fence, &s_fence->cb,
+						   amd_sched_process_job);
 			if (r == -ENOENT)
 			if (r == -ENOENT)
 				amd_sched_process_job(fence, &s_fence->cb);
 				amd_sched_process_job(fence, &s_fence->cb);
 			else if (r)
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 					  r);
-			fence_put(fence);
+			dma_fence_put(fence);
 		} else {
 		} else {
 			DRM_ERROR("Failed to run job!\n");
 			DRM_ERROR("Failed to run job!\n");
 			amd_sched_process_job(NULL, &s_fence->cb);
 			amd_sched_process_job(NULL, &s_fence->cb);

+ 13 - 13
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

@@ -25,7 +25,7 @@
 #define _GPU_SCHEDULER_H_
 #define _GPU_SCHEDULER_H_
 
 
 #include <linux/kfifo.h>
 #include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 struct amd_gpu_scheduler;
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 struct amd_sched_rq;
@@ -50,8 +50,8 @@ struct amd_sched_entity {
 	atomic_t			fence_seq;
 	atomic_t			fence_seq;
 	uint64_t                        fence_context;
 	uint64_t                        fence_context;
 
 
-	struct fence			*dependency;
-	struct fence_cb			cb;
+	struct dma_fence		*dependency;
+	struct dma_fence_cb		cb;
 };
 };
 
 
 /**
 /**
@@ -66,10 +66,10 @@ struct amd_sched_rq {
 };
 };
 
 
 struct amd_sched_fence {
 struct amd_sched_fence {
-	struct fence                    scheduled;
-	struct fence                    finished;
-	struct fence_cb                 cb;
-	struct fence                    *parent;
+	struct dma_fence                scheduled;
+	struct dma_fence                finished;
+	struct dma_fence_cb             cb;
+	struct dma_fence                *parent;
 	struct amd_gpu_scheduler	*sched;
 	struct amd_gpu_scheduler	*sched;
 	spinlock_t			lock;
 	spinlock_t			lock;
 	void                            *owner;
 	void                            *owner;
@@ -79,15 +79,15 @@ struct amd_sched_job {
 	struct amd_gpu_scheduler        *sched;
 	struct amd_gpu_scheduler        *sched;
 	struct amd_sched_entity         *s_entity;
 	struct amd_sched_entity         *s_entity;
 	struct amd_sched_fence          *s_fence;
 	struct amd_sched_fence          *s_fence;
-	struct fence_cb			finish_cb;
+	struct dma_fence_cb		finish_cb;
 	struct work_struct		finish_work;
 	struct work_struct		finish_work;
 	struct list_head		node;
 	struct list_head		node;
 	struct delayed_work		work_tdr;
 	struct delayed_work		work_tdr;
 };
 };
 
 
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
 {
 {
 	if (f->ops == &amd_sched_fence_ops_scheduled)
 	if (f->ops == &amd_sched_fence_ops_scheduled)
 		return container_of(f, struct amd_sched_fence, scheduled);
 		return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
  * these functions should be implemented in driver side
  * these functions should be implemented in driver side
 */
 */
 struct amd_sched_backend_ops {
 struct amd_sched_backend_ops {
-	struct fence *(*dependency)(struct amd_sched_job *sched_job);
-	struct fence *(*run_job)(struct amd_sched_job *sched_job);
+	struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+	struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
 	void (*timedout_job)(struct amd_sched_job *sched_job);
 	void (*timedout_job)(struct amd_sched_job *sched_job);
 	void (*free_job)(struct amd_sched_job *sched_job);
 	void (*free_job)(struct amd_sched_job *sched_job);
 };
 };

+ 26 - 22
drivers/gpu/drm/amd/scheduler/sched_fence.c

@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
 	spin_lock_init(&fence->lock);
 	spin_lock_init(&fence->lock);
 
 
 	seq = atomic_inc_return(&entity->fence_seq);
 	seq = atomic_inc_return(&entity->fence_seq);
-	fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
-		   &fence->lock, entity->fence_context, seq);
-	fence_init(&fence->finished, &amd_sched_fence_ops_finished,
-		   &fence->lock, entity->fence_context + 1, seq);
+	dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+		       &fence->lock, entity->fence_context, seq);
+	dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+		       &fence->lock, entity->fence_context + 1, seq);
 
 
 	return fence;
 	return fence;
 }
 }
 
 
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
 {
 {
-	int ret = fence_signal(&fence->scheduled);
+	int ret = dma_fence_signal(&fence->scheduled);
 
 
 	if (!ret)
 	if (!ret)
-		FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+		DMA_FENCE_TRACE(&fence->scheduled,
+				"signaled from irq context\n");
 	else
 	else
-		FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+		DMA_FENCE_TRACE(&fence->scheduled,
+				"was already signaled\n");
 }
 }
 
 
 void amd_sched_fence_finished(struct amd_sched_fence *fence)
 void amd_sched_fence_finished(struct amd_sched_fence *fence)
 {
 {
-	int ret = fence_signal(&fence->finished);
+	int ret = dma_fence_signal(&fence->finished);
 
 
 	if (!ret)
 	if (!ret)
-		FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+		DMA_FENCE_TRACE(&fence->finished,
+				"signaled from irq context\n");
 	else
 	else
-		FENCE_TRACE(&fence->finished, "was already signaled\n");
+		DMA_FENCE_TRACE(&fence->finished,
+				"was already signaled\n");
 }
 }
 
 
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "amd_sched";
 	return "amd_sched";
 }
 }
 
 
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
 {
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	return (const char *)fence->sched->name;
 	return (const char *)fence->sched->name;
 }
 }
 
 
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
 {
 {
 	return true;
 	return true;
 }
 }
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
  */
  */
 static void amd_sched_fence_free(struct rcu_head *rcu)
 static void amd_sched_fence_free(struct rcu_head *rcu)
 {
 {
-	struct fence *f = container_of(rcu, struct fence, rcu);
+	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 
 
-	fence_put(fence->parent);
+	dma_fence_put(fence->parent);
 	kmem_cache_free(sched_fence_slab, fence);
 	kmem_cache_free(sched_fence_slab, fence);
 }
 }
 
 
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
  * This function is called when the reference count becomes zero.
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
  * It just RCU schedules freeing up the fence.
  */
  */
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
 {
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 
 
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
  *
  *
  * Drop the extra reference from the scheduled fence to the base fence.
  * Drop the extra reference from the scheduled fence to the base fence.
  */
  */
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
 {
 {
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 
 
-	fence_put(&fence->scheduled);
+	dma_fence_put(&fence->scheduled);
 }
 }
 
 
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.signaled = NULL,
 	.signaled = NULL,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = amd_sched_fence_release_scheduled,
 	.release = amd_sched_fence_release_scheduled,
 };
 };
 
 
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.signaled = NULL,
 	.signaled = NULL,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = amd_sched_fence_release_finished,
 	.release = amd_sched_fence_release_finished,
 };
 };

+ 2 - 1
drivers/gpu/drm/arm/hdlcd_drv.c

@@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev)
 		return -EAGAIN;
 		return -EAGAIN;
 	}
 	}
 
 
-	component_match_add(&pdev->dev, &match, compare_dev, port);
+	drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+	of_node_put(port);
 
 
 	return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
 	return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
 					       match);
 					       match);

+ 3 - 1
drivers/gpu/drm/arm/malidp_drv.c

@@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
 		return -EAGAIN;
 		return -EAGAIN;
 	}
 	}
 
 
-	component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+	drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+				   port);
+	of_node_put(port);
 	return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
 	return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
 					       match);
 					       match);
 }
 }

+ 1 - 1
drivers/gpu/drm/armada/armada_drv.c

@@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev,
 			continue;
 			continue;
 		}
 		}
 
 
-		component_match_add(dev, match, compare_of, remote);
+		drm_of_component_match_add(dev, match, compare_of, remote);
 		of_node_put(remote);
 		of_node_put(remote);
 	}
 	}
 }
 }

+ 7 - 0
drivers/gpu/drm/bridge/Kconfig

@@ -57,6 +57,13 @@ config DRM_PARADE_PS8622
 	---help---
 	---help---
 	  Parade eDP-LVDS bridge chip driver.
 	  Parade eDP-LVDS bridge chip driver.
 
 
+config DRM_SIL_SII8620
+	tristate "Silicon Image SII8620 HDMI/MHL bridge"
+	depends on OF
+	select DRM_KMS_HELPER
+	help
+	  Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
 config DRM_SII902X
 config DRM_SII902X
 	tristate "Silicon Image sii902x RGB/HDMI bridge"
 	tristate "Silicon Image sii902x RGB/HDMI bridge"
 	depends on OF
 	depends on OF

+ 1 - 0
drivers/gpu/drm/bridge/Makefile

@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
 obj-$(CONFIG_DRM_SII902X) += sii902x.o
 obj-$(CONFIG_DRM_SII902X) += sii902x.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/

+ 1564 - 0
drivers/gpu/drm/bridge/sil-sii8620.c

@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL	VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+	CM_DISCONNECTED,
+	CM_DISCOVERY,
+	CM_MHL1,
+	CM_MHL3,
+	CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+	SINK_NONE,
+	SINK_HDMI,
+	SINK_DVI
+};
+
+enum sii8620_mt_state {
+	MT_STATE_READY,
+	MT_STATE_BUSY,
+	MT_STATE_DONE
+};
+
+struct sii8620 {
+	struct drm_bridge bridge;
+	struct device *dev;
+	struct clk *clk_xtal;
+	struct gpio_desc *gpio_reset;
+	struct gpio_desc *gpio_int;
+	struct regulator_bulk_data supplies[2];
+	struct mutex lock; /* context lock, protects fields below */
+	int error;
+	enum sii8620_mode mode;
+	enum sii8620_sink_type sink_type;
+	u8 cbus_status;
+	u8 stat[MHL_DST_SIZE];
+	u8 xstat[MHL_XDS_SIZE];
+	u8 devcap[MHL_DCAP_SIZE];
+	u8 xdevcap[MHL_XDC_SIZE];
+	u8 avif[19];
+	struct edid *edid;
+	unsigned int gen2_write_burst:1;
+	enum sii8620_mt_state mt_state;
+	struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+				  struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+	struct list_head node;
+	u8 reg[4];
+	u8 ret;
+	sii8620_mt_msg_cb send;
+	sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+	0x39, /* Main System */
+	0x3d, /* TDM and HSIC */
+	0x49, /* TMDS Receiver, MHL EDID */
+	0x4d, /* eMSC, HDCP, HSIC */
+	0x5d, /* MHL Spec */
+	0x64, /* MHL CBUS */
+	0x59, /* Hardware TPI (Transmitter Programming Interface) */
+	0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+	int ret = ctx->error;
+
+	ctx->error = 0;
+	return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+	struct device *dev = ctx->dev;
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 data = addr;
+	struct i2c_msg msg[] = {
+		{
+			.addr = sii8620_i2c_page[addr >> 8],
+			.flags = client->flags,
+			.len = 1,
+			.buf = &data
+		},
+		{
+			.addr = sii8620_i2c_page[addr >> 8],
+			.flags = client->flags | I2C_M_RD,
+			.len = len,
+			.buf = buf
+		},
+	};
+	int ret;
+
+	if (ctx->error)
+		return;
+
+	ret = i2c_transfer(client->adapter, msg, 2);
+	dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+	if (ret != 2) {
+		dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+			addr, len, ret);
+		ctx->error = ret < 0 ? ret : -EIO;
+	}
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+	u8 ret;
+
+	sii8620_read_buf(ctx, addr, &ret, 1);
+	return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+			      int len)
+{
+	struct device *dev = ctx->dev;
+	struct i2c_client *client = to_i2c_client(dev);
+	u8 data[2];
+	struct i2c_msg msg = {
+		.addr = sii8620_i2c_page[addr >> 8],
+		.flags = client->flags,
+		.len = len + 1,
+	};
+	int ret;
+
+	if (ctx->error)
+		return;
+
+	if (len > 1) {
+		msg.buf = kmalloc(len + 1, GFP_KERNEL);
+		if (!msg.buf) {
+			ctx->error = -ENOMEM;
+			return;
+		}
+		memcpy(msg.buf + 1, buf, len);
+	} else {
+		msg.buf = data;
+		msg.buf[1] = *buf;
+	}
+
+	msg.buf[0] = addr;
+
+	ret = i2c_transfer(client->adapter, &msg, 1);
+	dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+	if (ret != 1) {
+		dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+			addr, len, buf, ret);
+		ctx->error = ret ?: -EIO;
+	}
+
+	if (len > 1)
+		kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+	u8 d[] = { arr }; \
+	sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i += 2)
+		sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+	const u16 d[] = { seq }; \
+	__sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+	static const u16 d[] = { seq }; \
+	__sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+	val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+	sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg, *n;
+
+	list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+		list_del(&msg->node);
+		kfree(msg);
+	}
+	ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg;
+
+	if (ctx->error)
+		return;
+	if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+		return;
+
+	if (ctx->mt_state == MT_STATE_DONE) {
+		ctx->mt_state = MT_STATE_READY;
+		msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+				       node);
+		if (msg->recv)
+			msg->recv(ctx, msg);
+		list_del(&msg->node);
+		kfree(msg);
+	}
+
+	if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+		return;
+
+	ctx->mt_state = MT_STATE_BUSY;
+	msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+	if (msg->send)
+		msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+				    struct sii8620_mt_msg *msg)
+{
+	switch (msg->reg[0]) {
+	case MHL_WRITE_STAT:
+	case MHL_SET_INT:
+		sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+		sii8620_write(ctx, REG_MSC_COMMAND_START,
+			      BIT_MSC_COMMAND_START_WRITE_STAT);
+		break;
+	case MHL_MSC_MSG:
+		sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+		sii8620_write(ctx, REG_MSC_COMMAND_START,
+			      BIT_MSC_COMMAND_START_MSC_MSG);
+		break;
+	default:
+		dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+			msg->reg[0]);
+	}
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+	if (!msg)
+		ctx->error = -ENOMEM;
+	else
+		list_add_tail(&msg->node, &ctx->mt_queue);
+
+	return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+	if (!msg)
+		return;
+
+	msg->reg[0] = cmd;
+	msg->reg[1] = arg1;
+	msg->reg[2] = arg2;
+	msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+	sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+	sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+	sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+	sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+					struct sii8620_mt_msg *msg)
+{
+	u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+			| BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+			| BIT_EDID_CTRL_EDID_MODE_EN;
+
+	if (msg->reg[0] == MHL_READ_XDEVCAP)
+		ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+	sii8620_write_seq(ctx,
+		REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+		REG_EDID_CTRL, ctrl,
+		REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+	);
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+	while (--count >= 0) {
+		*src ^= *dst;
+		*dst++ ^= *src++;
+	}
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+	static const char * const sink_str[] = {
+		[SINK_NONE] = "NONE",
+		[SINK_HDMI] = "HDMI",
+		[SINK_DVI] = "DVI"
+	};
+
+	u8 dcap[MHL_DCAP_SIZE];
+	char sink_name[20];
+	struct device *dev = ctx->dev;
+
+	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+	if (ctx->error < 0)
+		return;
+
+	dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+	dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+		 dcap[MHL_DCAP_MHL_VERSION] / 16,
+		 dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+		 dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+		 dcap[MHL_DCAP_DEVICE_ID_L]);
+	sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+	if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+		return;
+
+	sii8620_fetch_edid(ctx);
+	if (!ctx->edid) {
+		dev_err(ctx->dev, "Cannot fetch EDID\n");
+		sii8620_mhl_disconnected(ctx);
+		return;
+	}
+
+	if (drm_detect_hdmi_monitor(ctx->edid))
+		ctx->sink_type = SINK_HDMI;
+	else
+		ctx->sink_type = SINK_DVI;
+
+	drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+	dev_info(dev, "detected sink(type: %s): %s\n",
+		 sink_str[ctx->sink_type], sink_name);
+	sii8620_set_upstream_edid(ctx);
+	sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+	sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+			 MHL_XDC_SIZE);
+
+	sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+			      MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+	sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+					struct sii8620_mt_msg *msg)
+{
+	u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+		| BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+		| BIT_EDID_CTRL_EDID_MODE_EN;
+
+	if (msg->reg[0] == MHL_READ_XDEVCAP)
+		ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+	sii8620_write_seq(ctx,
+		REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+			| BIT_INTR9_EDID_ERROR,
+		REG_EDID_CTRL, ctrl,
+		REG_EDID_FIFO_ADDR, 0
+	);
+
+	if (msg->reg[0] == MHL_READ_XDEVCAP)
+		sii8620_mr_xdevcap(ctx);
+	else
+		sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+	struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+	if (!msg)
+		return;
+
+	msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+	msg->send = sii8620_mt_read_devcap_send;
+	msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+	u8 lm_ddc, ddc_cmd, int3, cbus;
+	int fetched, i;
+	int edid_len = EDID_LENGTH;
+	u8 *edid;
+
+	sii8620_readb(ctx, REG_CBUS_STATUS);
+	lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+	ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+	sii8620_write_seq(ctx,
+		REG_INTR9_MASK, 0,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+		REG_HDCP2X_POLL_CS, 0x71,
+		REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+		REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+	);
+
+	for (i = 0; i < 256; ++i) {
+		u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+		if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+			break;
+		sii8620_write(ctx, REG_DDC_STATUS,
+			      BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+	}
+
+	sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+	edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+	if (!edid) {
+		ctx->error = -ENOMEM;
+		return;
+	}
+
+#define FETCH_SIZE 16
+	for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+		sii8620_readb(ctx, REG_DDC_STATUS);
+		sii8620_write_seq(ctx,
+			REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+			REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+			REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+		);
+		sii8620_write_seq(ctx,
+			REG_DDC_SEGM, fetched >> 8,
+			REG_DDC_OFFSET, fetched & 0xff,
+			REG_DDC_DIN_CNT1, FETCH_SIZE,
+			REG_DDC_DIN_CNT2, 0,
+			REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+		);
+
+		do {
+			int3 = sii8620_readb(ctx, REG_INTR3);
+			cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+			if (int3 & BIT_DDC_CMD_DONE)
+				break;
+
+			if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+				kfree(edid);
+				edid = NULL;
+				goto end;
+			}
+		} while (1);
+
+		sii8620_readb(ctx, REG_DDC_STATUS);
+		while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+			usleep_range(10, 20);
+
+		sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+		if (fetched + FETCH_SIZE == EDID_LENGTH) {
+			u8 ext = ((struct edid *)edid)->extensions;
+
+			if (ext) {
+				u8 *new_edid;
+
+				edid_len += ext * EDID_LENGTH;
+				new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+				if (!new_edid) {
+					kfree(edid);
+					ctx->error = -ENOMEM;
+					return;
+				}
+				edid = new_edid;
+			}
+		}
+
+		if (fetched + FETCH_SIZE == edid_len)
+			sii8620_write(ctx, REG_INTR3, int3);
+	}
+
+	sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+	kfree(ctx->edid);
+	ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+	sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+			| BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+	sii8620_write_seq_static(ctx,
+		REG_RX_HDMI_CTRL3, 0x00,
+		REG_PKT_FILTER_0, 0xFF,
+		REG_PKT_FILTER_1, 0xFF,
+		REG_ALICE0_BW_I2C, 0x06
+	);
+
+	sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+			BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+	sii8620_write_seq_static(ctx,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+			| BIT_EDID_CTRL_EDID_MODE_EN,
+		REG_EDID_FIFO_ADDR, 0,
+	);
+
+	sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+			  (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+	sii8620_write_seq_static(ctx,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+			| BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+			| BIT_EDID_CTRL_EDID_MODE_EN,
+		REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+		REG_INTR9_MASK, 0
+	);
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+	static const struct {
+		unsigned int rate;
+		u8 div;
+		u8 tp1;
+	} rates[] = {
+		{ 19200, 0x04, 0x53 },
+		{ 20000, 0x04, 0x62 },
+		{ 24000, 0x05, 0x75 },
+		{ 30000, 0x06, 0x92 },
+		{ 38400, 0x0c, 0xbc },
+	};
+	unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+		if (rate <= rates[i].rate)
+			break;
+
+	if (rate != rates[i].rate)
+		dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+			rate, rates[i].rate);
+
+	sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+	sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret)
+		return ret;
+	usleep_range(10000, 20000);
+	return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+	clk_disable_unprepare(ctx->clk_xtal);
+	gpiod_set_value(ctx->gpio_reset, 1);
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+	usleep_range(10000, 20000);
+	gpiod_set_value(ctx->gpio_reset, 0);
+	usleep_range(5000, 20000);
+	gpiod_set_value(ctx->gpio_reset, 1);
+	usleep_range(10000, 20000);
+	gpiod_set_value(ctx->gpio_reset, 0);
+	msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+			| BIT_PWD_SRST_CBUS_RST_SW_EN,
+		REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+	);
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+	if (ctx->mode != CM_MHL1) {
+		sii8620_write_seq_static(ctx,
+			REG_TX_ZONE_CTL1, 0x0,
+			REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+				| BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+				| BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+		);
+	} else {
+		sii8620_write_seq_static(ctx,
+			REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+			REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+				| BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+		);
+	}
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+	u8 uninitialized_var(val);
+
+	sii8620_write_seq_static(ctx,
+		REG_TPI_INTR_EN, 0,
+		REG_HDCP2X_INTR0_MASK, 0,
+		REG_TPI_COPP_DATA2, 0,
+		REG_TPI_INTR_ST0, ~0,
+	);
+
+	switch (ctx->sink_type) {
+	case SINK_DVI:
+		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+			| BIT_TPI_SC_TPI_AV_MUTE;
+		break;
+	case SINK_HDMI:
+		val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+			| BIT_TPI_SC_TPI_AV_MUTE
+			| BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+		break;
+	default:
+		return;
+	}
+
+	sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+			| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+		REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+			| BIT_VID_OVRRD_M1080P_OVRRD,
+		REG_VID_MODE, 0,
+		REG_MHL_TOP_CTL, 0x1,
+		REG_MHLTX_CTL6, 0xa0,
+		REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+		REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+	);
+
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+			      MHL_DST_LM_CLK_MODE_NORMAL |
+			      MHL_DST_LM_PATH_ENABLED);
+
+	sii8620_set_auto_zone(ctx);
+
+	sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+	sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+			  ARRAY_SIZE(ctx->avif));
+
+	sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+	if (ctx->mode < CM_MHL3)
+		sii8620_stop_video(ctx);
+
+	switch (ctx->sink_type) {
+	case SINK_HDMI:
+		sii8620_start_hdmi(ctx);
+		break;
+	case SINK_DVI:
+	default:
+		break;
+	}
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+	sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+	sii8620_write_seq_static(ctx,
+		REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+		REG_INTR8_MASK, 0
+	);
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+	sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+			BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+			| BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+	sii8620_write_seq_static(ctx,
+		REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+			| BIT_HPD_CTRL_HPD_HIGH,
+	);
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+	if (ctx->gen2_write_burst)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MDT_RCV_TIMEOUT, 100,
+		REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+	);
+	ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+	if (!ctx->gen2_write_burst)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MDT_XMIT_CTRL, 0,
+		REG_MDT_RCV_CTRL, 0
+	);
+	ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+			| BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+			| BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+			| BIT_MDT_XMIT_SM_ERROR,
+		REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+			| BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+			| BIT_MDT_RFIFO_DATA_RDY
+	);
+	sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+			| BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+		REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+			| BIT_MHL_EST_INT
+			| BIT_NOT_MHL_EST_INT
+			| BIT_CBUS_MHL3_DISCON_INT
+			| BIT_CBUS_MHL12_DISCON_INT
+			| BIT_RGND_READY_INT,
+		REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+			| BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+			| BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+		REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+			| BIT_MHL_DP_CTL0_TX_OE_OVR,
+		REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+		REG_MHL_DP_CTL1, 0xA2,
+		REG_MHL_DP_CTL2, 0x03,
+		REG_MHL_DP_CTL3, 0x35,
+		REG_MHL_DP_CTL5, 0x02,
+		REG_MHL_DP_CTL6, 0x02,
+		REG_MHL_DP_CTL7, 0x03,
+		REG_COC_CTLC, 0xFF,
+		REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+			| BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+		REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+			| BIT_COC_CALIBRATION_DONE,
+		REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+			| BIT_CBUS_CMD_ABORT,
+		REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+			| BIT_CBUS_HPD_CHG
+			| BIT_CBUS_MSC_MR_WRITE_STAT
+			| BIT_CBUS_MSC_MR_MSC_MSG
+			| BIT_CBUS_MSC_MR_WRITE_BURST
+			| BIT_CBUS_MSC_MR_SET_INT
+			| BIT_CBUS_MSC_MT_DONE_NACK
+	);
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+	if (ctx->mode == CM_MHL3)
+		sii8620_write_seq_static(ctx,
+			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+			REG_EMSCINTRMASK1,
+				BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+		);
+	else
+		sii8620_write_seq_static(ctx,
+			REG_HDCP2X_INTR0_MASK, 0x00,
+			REG_EMSCINTRMASK1, 0x00,
+			REG_HDCP2X_INTR0, 0xFF,
+			REG_INTR1, 0xFF,
+			REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+				| BIT_SYS_CTRL1_TX_CTRL_HDMI
+		);
+}
+
+#define SII8620_MHL_VERSION			0x32
+#define SII8620_SCRATCHPAD_SIZE			16
+#define SII8620_INT_STAT_SIZE			0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+	static const u8 devcap[MHL_DCAP_SIZE] = {
+		[MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+		[MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+		[MHL_DCAP_ADOPTER_ID_H] = 0x01,
+		[MHL_DCAP_ADOPTER_ID_L] = 0x41,
+		[MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+			| MHL_DCAP_VID_LINK_PPIXEL
+			| MHL_DCAP_VID_LINK_16BPP,
+		[MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+		[MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+		[MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+		[MHL_DCAP_BANDWIDTH] = 0x0f,
+		[MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+			| MHL_DCAP_FEATURE_RAP_SUPPORT
+			| MHL_DCAP_FEATURE_SP_SUPPORT,
+		[MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+		[MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+	};
+	static const u8 xdcap[MHL_XDC_SIZE] = {
+		[MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+			| MHL_XDC_ECBUS_S_8BIT,
+		[MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+			| MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+		[MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+		[MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+	};
+
+	sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+	sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+		REG_CBUS_MSC_COMPAT_CTRL,
+			BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+	);
+
+	sii8620_peer_specific_init(ctx);
+
+	sii8620_disable_hpd(ctx);
+
+	sii8620_write_seq_static(ctx,
+		REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+		REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+			| BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+		REG_TMDS0_CCTRL1, 0x90,
+		REG_TMDS_CLK_EN, 0x01,
+		REG_TMDS_CH_EN, 0x11,
+		REG_BGR_BIAS, 0x87,
+		REG_ALICE0_ZONE_CTRL, 0xE8,
+		REG_ALICE0_MODE_CTRL, 0x04,
+	);
+	sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+	sii8620_write_seq_static(ctx,
+		REG_TPI_HW_OPT3, 0x76,
+		REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+		REG_TPI_DTD_B2, 79,
+	);
+	sii8620_set_dev_cap(ctx);
+	sii8620_write_seq_static(ctx,
+		REG_MDT_XMIT_TIMEOUT, 100,
+		REG_MDT_XMIT_CTRL, 0x03,
+		REG_MDT_XFIFO_STAT, 0x00,
+		REG_MDT_RCV_TIMEOUT, 100,
+		REG_CBUS_LINK_CTRL_8, 0x1D,
+	);
+
+	sii8620_start_gen2_write_burst(ctx);
+	sii8620_write_seq_static(ctx,
+		REG_BIST_CTRL, 0x00,
+		REG_COC_CTL1, 0x10,
+		REG_COC_CTL2, 0x18,
+		REG_COC_CTLF, 0x07,
+		REG_COC_CTL11, 0xF8,
+		REG_COC_CTL17, 0x61,
+		REG_COC_CTL18, 0x46,
+		REG_COC_CTL19, 0x15,
+		REG_COC_CTL1A, 0x01,
+		REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+		REG_MHL_COC_CTL4, 0x2D,
+		REG_MHL_COC_CTL5, 0xF9,
+		REG_MSC_HEARTBEAT_CTRL, 0x27,
+	);
+	sii8620_disable_gen2_write_burst(ctx);
+
+	/* currently MHL3 is not supported, so we force version to 0 */
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+	sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+			      MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+			      | MHL_DST_CONN_POW_STAT);
+	sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+	if (ctx->mode == mode)
+		return;
+
+	ctx->mode = mode;
+
+	switch (mode) {
+	case CM_MHL1:
+		sii8620_write_seq_static(ctx,
+			REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+			REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+				| BIT_DPD_OSC_EN,
+			REG_COC_INTR_MASK, 0
+		);
+		break;
+	case CM_MHL3:
+		sii8620_write_seq_static(ctx,
+			REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+			REG_COC_CTL0, 0x40,
+			REG_MHL_COC_CTL1, 0x07
+		);
+		break;
+	case CM_DISCONNECTED:
+		break;
+	default:
+		dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+		break;
+	}
+
+	sii8620_set_auto_zone(ctx);
+
+	if (mode != CM_MHL1)
+		return;
+
+	sii8620_write_seq_static(ctx,
+		REG_MHL_DP_CTL0, 0xBC,
+		REG_MHL_DP_CTL1, 0xBB,
+		REG_MHL_DP_CTL3, 0x48,
+		REG_MHL_DP_CTL5, 0x39,
+		REG_MHL_DP_CTL2, 0x2A,
+		REG_MHL_DP_CTL6, 0x2A,
+		REG_MHL_DP_CTL7, 0x08
+	);
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+	sii8620_disable_gen2_write_burst(ctx);
+	sii8620_stop_video(ctx);
+	msleep(50);
+	sii8620_cbus_reset(ctx);
+	sii8620_set_mode(ctx, CM_DISCONNECTED);
+	sii8620_write_seq_static(ctx,
+		REG_COC_CTL0, 0x40,
+		REG_CBUS3_CNVT, 0x84,
+		REG_COC_CTL14, 0x00,
+		REG_COC_CTL0, 0x40,
+		REG_HRXCTRL3, 0x07,
+		REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+			| BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+			| BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+		REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+			| BIT_MHL_DP_CTL0_TX_OE_OVR,
+		REG_MHL_DP_CTL1, 0xBB,
+		REG_MHL_DP_CTL3, 0x48,
+		REG_MHL_DP_CTL5, 0x3F,
+		REG_MHL_DP_CTL2, 0x2F,
+		REG_MHL_DP_CTL6, 0x2A,
+		REG_MHL_DP_CTL7, 0x03
+	);
+	sii8620_disable_hpd(ctx);
+	sii8620_write_seq_static(ctx,
+		REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+		REG_MHL_COC_CTL1, 0x07,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+		REG_DISC_CTRL8, 0x00,
+		REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+			| BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+		REG_INT_CTRL, 0x00,
+		REG_MSC_HEARTBEAT_CTRL, 0x27,
+		REG_DISC_CTRL1, 0x25,
+		REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+		REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+		REG_MDT_INT_1, 0xff,
+		REG_MDT_INT_1_MASK, 0x00,
+		REG_MDT_INT_0, 0xff,
+		REG_MDT_INT_0_MASK, 0x00,
+		REG_COC_INTR, 0xff,
+		REG_COC_INTR_MASK, 0x00,
+		REG_TRXINTH, 0xff,
+		REG_TRXINTMH, 0x00,
+		REG_CBUS_INT_0, 0xff,
+		REG_CBUS_INT_0_MASK, 0x00,
+		REG_CBUS_INT_1, 0xff,
+		REG_CBUS_INT_1_MASK, 0x00,
+		REG_EMSCINTR, 0xff,
+		REG_EMSCINTRMASK, 0x00,
+		REG_EMSCINTR1, 0xff,
+		REG_EMSCINTRMASK1, 0x00,
+		REG_INTR8, 0xff,
+		REG_INTR8_MASK, 0x00,
+		REG_TPI_INTR_ST0, 0xff,
+		REG_TPI_INTR_EN, 0x00,
+		REG_HDCP2X_INTR0, 0xff,
+		REG_HDCP2X_INTR0_MASK, 0x00,
+		REG_INTR9, 0xff,
+		REG_INTR9_MASK, 0x00,
+		REG_INTR3, 0xff,
+		REG_INTR3_MASK, 0x00,
+		REG_INTR5, 0xff,
+		REG_INTR5_MASK, 0x00,
+		REG_INTR2, 0xff,
+		REG_INTR2_MASK, 0x00,
+	);
+	memset(ctx->stat, 0, sizeof(ctx->stat));
+	memset(ctx->xstat, 0, sizeof(ctx->xstat));
+	memset(ctx->devcap, 0, sizeof(ctx->devcap));
+	memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+	ctx->cbus_status = 0;
+	ctx->sink_type = SINK_NONE;
+	kfree(ctx->edid);
+	ctx->edid = NULL;
+	sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+		REG_CBUS_MSC_COMPAT_CTRL,
+			BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+	);
+	sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+	if (stat & VAL_CBUS_MHL_DISCON)
+		sii8620_mhl_disconnected(ctx);
+
+	if (stat & BIT_RGND_READY_INT) {
+		u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+		if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+			sii8620_mhl_discover(ctx);
+		} else {
+			sii8620_write_seq_static(ctx,
+				REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+					| BIT_DISC_CTRL9_NOMHL_EST
+					| BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+				REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+					| BIT_CBUS_MHL3_DISCON_INT
+					| BIT_CBUS_MHL12_DISCON_INT
+					| BIT_NOT_MHL_EST_INT
+			);
+		}
+	}
+	if (stat & BIT_MHL_EST_INT)
+		sii8620_mhl_init(ctx);
+
+	sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+	if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+		dev_dbg(ctx->dev, "HAWB idle\n");
+
+	sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+	if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+		sii8620_set_mode(ctx, CM_MHL1);
+		sii8620_peer_specific_init(ctx);
+		sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+			       | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+	}
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+	if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+				      MHL_DST_LM_CLK_MODE_NORMAL
+				      | MHL_DST_LM_PATH_ENABLED);
+		sii8620_mt_read_devcap(ctx, false);
+	} else {
+		sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+				      MHL_DST_LM_CLK_MODE_NORMAL);
+	}
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+	u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+	sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+	sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+	sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+	sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+	if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+		sii8620_status_changed_dcap(ctx);
+
+	if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+		sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+	u8 ints[MHL_INT_SIZE];
+
+	sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+	sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+	struct device *dev = ctx->dev;
+
+	if (list_empty(&ctx->mt_queue)) {
+		dev_err(dev, "unexpected MSC MT response\n");
+		return NULL;
+	}
+
+	return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+	if (!msg)
+		return;
+
+	msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+	ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+	struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+	u8 buf[2];
+
+	if (!msg)
+		return;
+
+	sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+	switch (buf[0]) {
+	case MHL_MSC_MSG_RAPK:
+		msg->ret = buf[1];
+		ctx->mt_state = MT_STATE_DONE;
+		break;
+	default:
+		dev_err(ctx->dev, "%s message type %d,%d not supported",
+			__func__, buf[0], buf[1]);
+	}
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+	if (stat & ~BIT_CBUS_HPD_CHG)
+		sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+	if (stat & BIT_CBUS_HPD_CHG) {
+		u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+		if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+			sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+		} else {
+			stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+			cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+		}
+		ctx->cbus_status = cbus_stat;
+	}
+
+	if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+		sii8620_msc_mr_write_stat(ctx);
+
+	if (stat & BIT_CBUS_MSC_MR_SET_INT)
+		sii8620_msc_mr_set_int(ctx);
+
+	if (stat & BIT_CBUS_MSC_MT_DONE)
+		sii8620_msc_mt_done(ctx);
+
+	if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+		sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+	sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+	sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+	sii8620_write(ctx, REG_INTR9, stat);
+
+	if (stat & BIT_INTR9_DEVCAP_DONE)
+		ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+	sii8620_write_seq_static(ctx,
+		REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+		REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+	);
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+	sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+		      BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+		      BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+	sii8620_stop_video(ctx);
+
+	sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+	if (stat & BIT_INTR_SCDT_CHANGE) {
+		u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+		if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+			sii8620_scdt_high(ctx);
+		else
+			sii8620_scdt_low(ctx);
+	}
+
+	sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+	u8 vsif[11];
+
+	sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+		      VAL_RX_HDMI_CTRL2_DEFVAL |
+		      BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+	sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+			 ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+	sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+	sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+			 ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+	u8 stat = sii8620_readb(ctx, REG_INTR8)
+		& (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+	sii8620_write(ctx, REG_INTR8, stat);
+
+	if (stat & BIT_CEA_NEW_VSI)
+		sii8620_new_vsi(ctx);
+
+	if (stat & BIT_CEA_NEW_AVI)
+		sii8620_new_avi(ctx);
+
+	if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+		sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+	return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+	static const struct {
+		int bit;
+		void (*handler)(struct sii8620 *ctx);
+	} irq_vec[] = {
+		{ BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+		{ BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+		{ BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+		{ BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+		{ BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+		{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+		{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+		{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+	};
+	struct sii8620 *ctx = data;
+	u8 stats[LEN_FAST_INTR_STAT];
+	int i, ret;
+
+	mutex_lock(&ctx->lock);
+
+	sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+	for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+		if (sii8620_test_bit(irq_vec[i].bit, stats))
+			irq_vec[i].handler(ctx);
+
+	sii8620_mt_work(ctx);
+
+	ret = sii8620_clear_error(ctx);
+	if (ret) {
+		dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+		sii8620_mhl_disconnected(ctx);
+	}
+	mutex_unlock(&ctx->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+	struct device *dev = ctx->dev;
+	u8 ver[5];
+	int ret;
+
+	ret = sii8620_hw_on(ctx);
+	if (ret) {
+		dev_err(dev, "Error powering on, %d.\n", ret);
+		return;
+	}
+	sii8620_hw_reset(ctx);
+
+	sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+	ret = sii8620_clear_error(ctx);
+	if (ret) {
+		dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+		return;
+	}
+
+	dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+		 ver[3], ver[2], ver[4]);
+
+	sii8620_write(ctx, REG_DPD,
+		      BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+	sii8620_xtal_set_rate(ctx);
+	sii8620_disconnect(ctx);
+
+	sii8620_write_seq_static(ctx,
+		REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+			| VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+		REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+		REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+	);
+
+	ret = sii8620_clear_error(ctx);
+	if (ret) {
+		dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+		return;
+	}
+
+	enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+			       const struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct sii8620 *ctx = bridge_to_sii8620(bridge);
+	bool ret = false;
+	int max_clock = 74250;
+
+	mutex_lock(&ctx->lock);
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		goto out;
+
+	if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+		max_clock = 300000;
+
+	ret = mode->clock <= max_clock;
+
+out:
+	mutex_unlock(&ctx->lock);
+
+	return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+	.mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct sii8620 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->dev = dev;
+	mutex_init(&ctx->lock);
+	INIT_LIST_HEAD(&ctx->mt_queue);
+
+	ctx->clk_xtal = devm_clk_get(dev, "xtal");
+	if (IS_ERR(ctx->clk_xtal)) {
+		dev_err(dev, "failed to get xtal clock from DT\n");
+		return PTR_ERR(ctx->clk_xtal);
+	}
+
+	if (!client->irq) {
+		dev_err(dev, "no irq provided\n");
+		return -EINVAL;
+	}
+	irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+	ret = devm_request_threaded_irq(dev, client->irq, NULL,
+					sii8620_irq_thread,
+					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+					"sii8620", ctx);
+
+	ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ctx->gpio_reset)) {
+		dev_err(dev, "failed to get reset gpio from DT\n");
+		return PTR_ERR(ctx->gpio_reset);
+	}
+
+	ctx->supplies[0].supply = "cvcc10";
+	ctx->supplies[1].supply = "iovcc18";
+	ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+	if (ret)
+		return ret;
+
+	i2c_set_clientdata(client, ctx);
+
+	ctx->bridge.funcs = &sii8620_bridge_funcs;
+	ctx->bridge.of_node = dev->of_node;
+	drm_bridge_add(&ctx->bridge);
+
+	sii8620_cable_in(ctx);
+
+	return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+	struct sii8620 *ctx = i2c_get_clientdata(client);
+
+	disable_irq(to_i2c_client(ctx->dev)->irq);
+	drm_bridge_remove(&ctx->bridge);
+	sii8620_hw_off(ctx);
+
+	return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+	{ .compatible = "sil,sii8620" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+	{ "sii8620", 0 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+	.driver = {
+		.name	= "sii8620",
+		.of_match_table = of_match_ptr(sii8620_dt_match),
+	},
+	.probe		= sii8620_probe,
+	.remove		= sii8620_remove,
+	.id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");

+ 1517 - 0
drivers/gpu/drm/bridge/sil-sii8620.h

@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL				0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH				0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL				0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH				0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV				0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510			0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1				0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET		BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN			BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET		BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD		BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN		BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN		BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI		BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET		BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD					0x000b
+#define BIT_DPD_PWRON_PLL			BIT(7)
+#define BIT_DPD_PDNTX12				BIT(6)
+#define BIT_DPD_PDNRX12				BIT(5)
+#define BIT_DPD_OSC_EN				BIT(4)
+#define BIT_DPD_PWRON_HSIC			BIT(3)
+#define BIT_DPD_PDIDCK_N			BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N			BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL				0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE			BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE			BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE			BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL			BIT(4)
+#define BIT_DCTL_TRANSCODE			BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE		BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL		BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE			BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST				0x000e
+#define BIT_PWD_SRST_COC_DOC_RST		BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW		BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN		BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST		BIT(4)
+#define BIT_PWD_SRST_CBUS_RST			BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO		BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST		BIT(1)
+#define BIT_PWD_SRST_SW_RST			BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1				0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL				0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE				0x004a
+#define BIT_VID_MODE_M1080P			BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD				0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE		BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD		BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON		BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK	BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN	BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD		BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD	BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR			0x0057
+#define REG_PAGE7_ADDR				0x0058
+#define REG_PAGE8_ADDR				0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT			0x005f
+#define LEN_FAST_INTR_STAT			7
+#define BIT_FAST_INTR_STAT_TIMR			8
+#define BIT_FAST_INTR_STAT_INT2			9
+#define BIT_FAST_INTR_STAT_DDC			10
+#define BIT_FAST_INTR_STAT_SCDT			11
+#define BIT_FAST_INTR_STAT_INFR			13
+#define BIT_FAST_INTR_STAT_EDID			14
+#define BIT_FAST_INTR_STAT_HDCP			15
+#define BIT_FAST_INTR_STAT_MSC			16
+#define BIT_FAST_INTR_STAT_MERR			17
+#define BIT_FAST_INTR_STAT_G2WB			18
+#define BIT_FAST_INTR_STAT_G2WB_ERR		19
+#define BIT_FAST_INTR_STAT_DISC			28
+#define BIT_FAST_INTR_STAT_BLOCK		30
+#define BIT_FAST_INTR_STAT_LTRN			31
+#define BIT_FAST_INTR_STAT_HDCP2		32
+#define BIT_FAST_INTR_STAT_TDM			42
+#define BIT_FAST_INTR_STAT_COC			51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1				0x006e
+#define BIT_CTRL1_GPIO_I_8			BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8			BIT(4)
+#define BIT_CTRL1_GPIO_I_7			BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7			BIT(2)
+#define BIT_CTRL1_GPIO_I_6			BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6			BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL				0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP		BIT(7)
+#define BIT_INT_CTRL_INTR_OD			BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY		BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE				0x0070
+#define BIT_INTR_STATE_INTR_STATE		BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1				0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2				0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3				0x0073
+#define BIT_DDC_CMD_DONE			BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5				0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK				0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK				0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK				0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK				0x0078
+#define BIT_INTR_SCDT_CHANGE			BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL				0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL		BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN		BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH			BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN		BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1			BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1			BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0			BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0			BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL				0x007a
+#define BIT_CTRL_GPIO_I_5			BIT(7)
+#define BIT_CTRL_GPIO_OEN_5			BIT(6)
+#define BIT_CTRL_GPIO_I_4			BIT(5)
+#define BIT_CTRL_GPIO_OEN_4			BIT(4)
+#define BIT_CTRL_GPIO_I_3			BIT(3)
+#define BIT_CTRL_GPIO_OEN_3			BIT(2)
+#define BIT_CTRL_GPIO_I_2			BIT(1)
+#define BIT_CTRL_GPIO_OEN_2			BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7				0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8				0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK				0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK				0x007e
+#define BIT_CEA_NEW_VSI				BIT(2)
+#define BIT_CEA_NEW_AVI				BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL				0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE			BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4				0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL		BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT		BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL				0x00bb
+#define BIT_RXBIST_VGB_EN			BIT(7)
+#define BIT_TXBIST_VGB_EN			BIT(6)
+#define BIT_BIST_START_SEL			BIT(5)
+#define BIT_BIST_START_BIT			BIT(4)
+#define BIT_BIST_ALWAYS_ON			BIT(3)
+#define BIT_BIST_TRANS				BIT(2)
+#define BIT_BIST_RESET				BIT(1)
+#define BIT_BIST_EN				BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL			0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL		0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE			0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0	0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0			0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1			0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2			0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN			0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC				0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED		BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN		BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW			BIT(2)
+#define BIT_LM_DDC_DDC_GRANT			BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST		BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL				0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC			BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL			BIT(6)
+#define BIT_DDC_MANUAL_DSDA			BIT(5)
+#define BIT_DDC_MANUAL_DSCL			BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN		BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP		BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA			BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL			BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR				0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR			0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM				0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET				0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1			0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2			0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8	0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS				0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW		BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK		BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG		BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL		BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY		BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE	BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE	BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD				0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN			BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN			BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN			BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD			0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK		0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO		0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT		0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA				0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT			0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8	BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT	0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT			0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL				0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL		BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL		BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK		0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE		BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK		BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR			0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR				0x00fc
+#define REG_PAGE2_ADDR				0x00fd
+#define REG_PAGE3_ADDR				0x00fe
+#define REG_HW_TPI_ADDR				0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST				0x0100
+#define BIT_UTSRST_FC_SRST			BIT(5)
+#define BIT_UTSRST_KEEPER_SRST			BIT(4)
+#define BIT_UTSRST_HTX_SRST			BIT(3)
+#define BIT_UTSRST_TRX_SRST			BIT(2)
+#define BIT_UTSRST_TTX_SRST			BIT(1)
+#define BIT_UTSRST_HRX_SRST			BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3				0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL		0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN			BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN			BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET		BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL				0x0111
+#define REG_HRXINTH				0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB				0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0		0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT	BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0		0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS				0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS				0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS				0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL				0x0136
+#define BIT_TTXINTL_TTX_INTR7			BIT(7)
+#define BIT_TTXINTL_TTX_INTR6			BIT(6)
+#define BIT_TTXINTL_TTX_INTR5			BIT(5)
+#define BIT_TTXINTL_TTX_INTR4			BIT(4)
+#define BIT_TTXINTL_TTX_INTR3			BIT(3)
+#define BIT_TTXINTL_TTX_INTR2			BIT(2)
+#define BIT_TTXINTL_TTX_INTR1			BIT(1)
+#define BIT_TTXINTL_TTX_INTR0			BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH				0x0137
+#define BIT_TTXINTH_TTX_INTR15			BIT(7)
+#define BIT_TTXINTH_TTX_INTR14			BIT(6)
+#define BIT_TTXINTH_TTX_INTR13			BIT(5)
+#define BIT_TTXINTH_TTX_INTR12			BIT(4)
+#define BIT_TTXINTH_TTX_INTR11			BIT(3)
+#define BIT_TTXINTH_TTX_INTR10			BIT(2)
+#define BIT_TTXINTH_TTX_INTR9			BIT(1)
+#define BIT_TTXINTH_TTX_INTR8			BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL				0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW		BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC		BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0		0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS				0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS				0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS				0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2				0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL				0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH				0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH				0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL				0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP		BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB		BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY		BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1		BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1			BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL				0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH				0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER				0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0		0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC				0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE		BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE			BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13				0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14				0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15				0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50				0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0				0x01ec
+#define REG_FCINTR1				0x01ed
+#define REG_FCINTR2				0x01ee
+#define REG_FCINTR3				0x01ef
+#define REG_FCINTR4				0x01f0
+#define REG_FCINTR5				0x01f1
+#define REG_FCINTR6				0x01f2
+#define REG_FCINTR7				0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL				0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL		0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE		0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL		0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW		BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE		BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1			0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL		0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL		0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN				0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN			BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN				0x0212
+#define BIT_TMDS_CH_EN_CH0_EN			BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN			BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS				0x0215
+#define BIT_BGR_BIAS_BGR_EN			BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D			0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C			0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL			0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N		BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20	BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C		0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL		0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL			0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C	0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL		0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6				0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL			0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8		0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0			0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT	BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT	BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT		BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT		BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT		BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT		BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT		BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT		BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1			0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS	BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS	BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT		BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT		BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT		BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT		BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3			0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE	BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE	BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP	BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI		BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS	BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT			BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT			BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0			0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC	BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE	BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE	BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN	BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE	BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2			0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT		0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n)		((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE		BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI	BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3			0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN	0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER			0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP	0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI	BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI	BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID	BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN	BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN	BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1		0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON		0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9				0x02e0
+#define BIT_INTR9_EDID_ERROR			BIT(6)
+#define BIT_INTR9_EDID_DONE			BIT(5)
+#define BIT_INTR9_DEVCAP_DONE			BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK				0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START			0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START	BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START	BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START	BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START	BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START	BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START	BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0	BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START	BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL				0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID		BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN		BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP	BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO	BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL	BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV		BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN		BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR			0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA			0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON			0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA			0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT			0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA			0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE	BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON	BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN	BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL	BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN	BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL	BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW			0x02f3
+#define REG_TX_IP_BIST_INST_HIGH		0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW			0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH			0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW			0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH		0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL				0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS		BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE		BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE		BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE		BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO		BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO		BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ		BIT(1)
+#define BIT_GENCTL_EMSC_EN			BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT				0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN		BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT		0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL			0x031a
+#define REG_EMSCRFIFOBCNTH			0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT				0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT			0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST		BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST		BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST		BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE	BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR				0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY		BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT		BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR	BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR	BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR	BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE		BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT		BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD		BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK			0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT		0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT			0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1				0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR	BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1			0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0	BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL				0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL		BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL		BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL		0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0				0x0331
+#define BIT_MHL_DP_CTL0_DP_OE			BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR		BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE			0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1				0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL		0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL		0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2				0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN		BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL		0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL		0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL		0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3				0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL		0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL		0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4				0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL		0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL		0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5				0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR		BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN			BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL	0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL		0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL		0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0			0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN		BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO		0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10	0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6	0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4	0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2	0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5	0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3	0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1	0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO		0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X	0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X	0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X	0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X	0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL	BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE		BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2			0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN		BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO		BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK		BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL		0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0			0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE	BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL	0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734	0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747	0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740	0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754	0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL	0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL		0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST	0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK	0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG	0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1			0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL	0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM		0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM		0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM		0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0			0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN		BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL		0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL		0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1			0x0343
+#define BIT_MHL_COC_CTL1_COC_EN			BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL		0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3			0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN		BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4			0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL		0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL		0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5			0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0			0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN		BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM		0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE		0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN		BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6				0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN		BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN		BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN		BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN		BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN	BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL		BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7				0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL	0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL		0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1			0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE	0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL			0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE	0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE		0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS		0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS		0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS	0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS			0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_			0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT	BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN	BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0			0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK			0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0			0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN	BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL	BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR	BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE	BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE	BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER	BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX		BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN		BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1			0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0	0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW		BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR	BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK	BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW	BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL			0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR	BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD	BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K			0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN			0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT			0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT		0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1				0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0			0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0			0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS			0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL				0x03e0
+#define BIT_M3_CTRL_H2M_SWRST			BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL			BIT(3)
+#define BIT_M3_CTRL_M3AV_EN			BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS			BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN		BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+				  | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+				| BIT_M3_CTRL_M3AV_EN \
+				| BIT_M3_CTRL_ENC_TMDS \
+				| BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL				0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN	BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN	BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN		BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED	BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN		BIT(0)
+
+#define REG_M3_POSTM				0x03e2
+#define MSK_M3_POSTM_RRP_DECODE			0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID		0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL				0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH		0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN		BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN			0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN	0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN	0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0			0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0				0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0				0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0			0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0			0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0			0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2				0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED		0x01
+#define VAL_TPI_QUAN_RANGE_FULL			0x02
+#define VAL_TPI_FORMAT_RGB			0x00
+#define VAL_TPI_FORMAT_YCBCR444			0x01
+#define VAL_TPI_FORMAT_YCBCR422			0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB		0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+		(VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT				0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE		BIT(7)
+#define BIT_TPI_INPUT_ENDITHER			BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE		0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT		0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT				0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709		BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE	0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT		0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM			0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC				0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG		BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL		BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1		BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN	BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE			BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST		BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW			BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI	BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1			0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT		BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT		BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS	0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL		0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST	0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED	0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP	BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0	BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE		BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1	BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2			0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION	BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD		BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN	BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK	BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD	BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL	BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN				0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0			0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT	BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT		BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT	BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT	BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT	BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS			0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1			0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED		BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT		0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2			0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS		0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE		BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED		BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH		0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3				0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG		BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP		BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE	BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL	0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL			0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN		BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT		BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG	BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL		0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0				0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0				0x0700
+#define REG_COC_STAT_1				0x0701
+#define REG_COC_STAT_2				0x0702
+#define REG_COC_STAT_3				0x0703
+#define REG_COC_STAT_4				0x0704
+#define REG_COC_STAT_5				0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0				0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1				0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6		0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0		0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2				0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6		0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0		0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3				0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7		BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0		0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6				0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7		BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6		BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0		0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7				0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7		BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6		BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5		BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3		0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0		0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9				0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA				0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB				0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC				0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD				0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7		BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0		0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE				0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7		BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0		0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF				0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3		0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0		0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11				0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4		0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0		0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14				0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4		0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0		0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15				0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7		BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4		0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0		0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR				0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK			0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE		BIT(0)
+#define BIT_COC_CALIBRATION_DONE		BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0			0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON		BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17				0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4		0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0		0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18				0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4		0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0		0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19				0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4		0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0		0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A				0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2		0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0		0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8				0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9				0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4				0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM		0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0				0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6				0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7		BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6		BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4		0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0		0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7				0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7		BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6		BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5		BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3		0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0		0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8				0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7		BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4		0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2		0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0		0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9				0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA				0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE				0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7		BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6		BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4		0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0		0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK			0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK			0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK			0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK			0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT			0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT			0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL			0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN		BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN	BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN	BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN	BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE		BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL	BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR	BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT			0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL			0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN		BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN	BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID	BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT	BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL	BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR	BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT			0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT			0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT	0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT			0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN	BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN	0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0				0x058c
+#define BIT_MDT_RFIFO_DATA_RDY			BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE		BIT(2)
+#define BIT_MDT_XFIFO_EMPTY			BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK			0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1				0x058e
+#define BIT_MDT_RCV_TIMEOUT			BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD		BIT(1)
+#define BIT_MDT_RCV_SM_ERROR			BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT			BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD		BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR			BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK			0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID			0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS				0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT	BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS		BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD		BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE		BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED		BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0				0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK		BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT			BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST		BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG			BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT		BIT(3)
+#define BIT_CBUS_HPD_CHG			BIT(2)
+#define BIT_CBUS_MSC_MT_DONE			BIT(1)
+#define BIT_CBUS_CNX_CHG			BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK			0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1				0x0594
+#define BIT_CBUS_CMD_ABORT			BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD			BIT(3)
+#define BIT_CBUS_DDC_ABORT			BIT(2)
+#define BIT_CBUS_CEC_ABORT			BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK			0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT			0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK			0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT			0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK		0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT			0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK		0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0			0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK		0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8			0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT				0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE		0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE		0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START			0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG		BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST	BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT	BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP	BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG		BIT(1)
+#define BIT_MSC_COMMAND_START_PEER		BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET			0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA		0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA		0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0			0x05bc
+#define REG_MSC_MT_RCVD_DATA1			0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA	0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA	0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL			0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN	BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL		0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN	BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT				0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT		0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL	0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN		BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN		BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1				0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN		BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY			BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT			0x30
+#define MSK_DISC_CTRL1_DISC_CYC			0x0c
+#define BIT_DISC_CTRL1_DISC_EN			BIT(0)
+
+#define VAL_PUP_OFF				0
+#define VAL_PUP_20K				1
+#define VAL_PUP_5K				2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4				0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL		0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL		0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5				0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE		BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL		0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8				0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS	BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN	BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9			0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP		BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN		BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT		BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST		BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED	BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS	BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1				0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE		BIT(5)
+#define MSK_DISC_STAT1_DISC_SM			0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2				0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL		BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS		BIT(5)
+#define BIT_DISC_STAT2_RSEN			BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN			0x0c
+#define VAL_DISC_STAT2_DEFAULT			0x00
+#define VAL_DISC_STAT2_MHL1_2			0x04
+#define VAL_DISC_STAT2_MHL3			0x08
+#define VAL_DISC_STAT2_RESERVED			0x0c
+
+#define MSK_DISC_STAT2_RGND			0x03
+#define VAL_RGND_OPEN				0x00
+#define VAL_RGND_2K				0x01
+#define VAL_RGND_1K				0x02
+#define VAL_RGND_SHORT				0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0			0x05ed
+#define BIT_RGND_READY_INT			BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT		BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT		BIT(4)
+#define BIT_NOT_MHL_EST_INT			BIT(3)
+#define BIT_MHL_EST_INT				BIT(2)
+#define BIT_MHL3_EST_INT			BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+			    | BIT_CBUS_MHL3_DISCON_INT \
+			    | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK		0x05ee
+
+#endif /* __SIL_SII8620_H__ */

+ 3 - 5
drivers/gpu/drm/drm_atomic.c

@@ -705,8 +705,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
 		state->src_w = val;
 		state->src_w = val;
 	} else if (property == config->prop_src_h) {
 	} else if (property == config->prop_src_h) {
 		state->src_h = val;
 		state->src_h = val;
-	} else if (property == config->rotation_property ||
-		   property == plane->rotation_property) {
+	} else if (property == plane->rotation_property) {
 		if (!is_power_of_2(val & DRM_ROTATE_MASK))
 		if (!is_power_of_2(val & DRM_ROTATE_MASK))
 			return -EINVAL;
 			return -EINVAL;
 		state->rotation = val;
 		state->rotation = val;
@@ -766,8 +765,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
 		*val = state->src_w;
 		*val = state->src_w;
 	} else if (property == config->prop_src_h) {
 	} else if (property == config->prop_src_h) {
 		*val = state->src_h;
 		*val = state->src_h;
-	} else if (property == config->rotation_property ||
-		   property == plane->rotation_property) {
+	} else if (property == plane->rotation_property) {
 		*val = state->rotation;
 		*val = state->rotation;
 	} else if (property == plane->zpos_property) {
 	} else if (property == plane->zpos_property) {
 		*val = state->zpos;
 		*val = state->zpos;
@@ -1465,7 +1463,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
 
 
 static struct drm_pending_vblank_event *create_vblank_event(
 static struct drm_pending_vblank_event *create_vblank_event(
 		struct drm_device *dev, struct drm_file *file_priv,
 		struct drm_device *dev, struct drm_file *file_priv,
-		struct fence *fence, uint64_t user_data)
+		struct dma_fence *fence, uint64_t user_data)
 {
 {
 	struct drm_pending_vblank_event *e = NULL;
 	struct drm_pending_vblank_event *e = NULL;
 	int ret;
 	int ret;

+ 4 - 4
drivers/gpu/drm/drm_atomic_helper.c

@@ -30,7 +30,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 #include "drm_crtc_internal.h"
 #include "drm_crtc_internal.h"
 
 
@@ -1017,7 +1017,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * just uses the atomic state to find the changed planes)
  * just uses the atomic state to find the changed planes)
  *
  *
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
  */
  */
 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 				      struct drm_atomic_state *state,
 				      struct drm_atomic_state *state,
@@ -1041,11 +1041,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 		 * still interrupt the operation. Instead of blocking until the
 		 * still interrupt the operation. Instead of blocking until the
 		 * timer expires, make the wait interruptible.
 		 * timer expires, make the wait interruptible.
 		 */
 		 */
-		ret = fence_wait(plane_state->fence, pre_swap);
+		ret = dma_fence_wait(plane_state->fence, pre_swap);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
 
 
-		fence_put(plane_state->fence);
+		dma_fence_put(plane_state->fence);
 		plane_state->fence = NULL;
 		plane_state->fence = NULL;
 	}
 	}
 
 

+ 4 - 28
drivers/gpu/drm/drm_blend.c

@@ -89,7 +89,7 @@
  * On top of this basic transformation additional properties can be exposed by
  * On top of this basic transformation additional properties can be exposed by
  * the driver:
  * the driver:
  *
  *
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
  *   rotation and reflection step between the source and destination rectangles.
  *   rotation and reflection step between the source and destination rectangles.
  *   Without this property the rectangle is only scaled, but not rotated or
  *   Without this property the rectangle is only scaled, but not rotated or
  *   reflected.
  *   reflected.
@@ -105,18 +105,12 @@
  */
  */
 
 
 /**
 /**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
  * @supported_rotations: bitmask of supported rotations and reflections
  * @supported_rotations: bitmask of supported rotations and reflections
  *
  *
  * This creates a new property with the selected support for transformations.
  * This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
  *
  *
  * Since a rotation by 180° degress is the same as reflecting both along the x
  * Since a rotation by 180° degress is the same as reflecting both along the x
  * and the y axis the rotation property is somewhat redundant. Drivers can use
  * and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,24 +138,6 @@
  * rotation. After reflection, the rotation is applied to the image sampled from
  * rotation. After reflection, the rotation is applied to the image sampled from
  * the source rectangle, before scaling it to fit the destination rectangle.
  * the source rectangle, before scaling it to fit the destination rectangle.
  */
  */
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-						       unsigned int supported_rotations)
-{
-	static const struct drm_prop_enum_list props[] = {
-		{ __builtin_ffs(DRM_ROTATE_0) - 1,   "rotate-0" },
-		{ __builtin_ffs(DRM_ROTATE_90) - 1,  "rotate-90" },
-		{ __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
-		{ __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
-		{ __builtin_ffs(DRM_REFLECT_X) - 1,  "reflect-x" },
-		{ __builtin_ffs(DRM_REFLECT_Y) - 1,  "reflect-y" },
-	};
-
-	return drm_property_create_bitmask(dev, 0, "rotation",
-					   props, ARRAY_SIZE(props),
-					   supported_rotations);
-}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
-
 int drm_plane_create_rotation_property(struct drm_plane *plane,
 int drm_plane_create_rotation_property(struct drm_plane *plane,
 				       unsigned int rotation,
 				       unsigned int rotation,
 				       unsigned int supported_rotations)
 				       unsigned int supported_rotations)

+ 18 - 0
drivers/gpu/drm/drm_dp_dual_mode_helper.c

@@ -142,6 +142,11 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
 		      sizeof(dp_dual_mode_hdmi_id)) == 0;
 		      sizeof(dp_dual_mode_hdmi_id)) == 0;
 }
 }
 
 
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+	return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
 static bool is_type2_adaptor(uint8_t adaptor_id)
 static bool is_type2_adaptor(uint8_t adaptor_id)
 {
 {
 	return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
 	return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
@@ -193,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
 	 */
 	 */
 	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
 	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
 				    hdmi_id, sizeof(hdmi_id));
 				    hdmi_id, sizeof(hdmi_id));
+	DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+		      ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
 	if (ret)
 	if (ret)
 		return DRM_DP_DUAL_MODE_UNKNOWN;
 		return DRM_DP_DUAL_MODE_UNKNOWN;
 
 
@@ -210,6 +217,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
 	 */
 	 */
 	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
 	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
 				    &adaptor_id, sizeof(adaptor_id));
 				    &adaptor_id, sizeof(adaptor_id));
+	DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+		      adaptor_id, ret);
 	if (ret == 0) {
 	if (ret == 0) {
 		if (is_lspcon_adaptor(hdmi_id, adaptor_id))
 		if (is_lspcon_adaptor(hdmi_id, adaptor_id))
 			return DRM_DP_DUAL_MODE_LSPCON;
 			return DRM_DP_DUAL_MODE_LSPCON;
@@ -219,6 +228,15 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
 			else
 			else
 				return DRM_DP_DUAL_MODE_TYPE2_DVI;
 				return DRM_DP_DUAL_MODE_TYPE2_DVI;
 		}
 		}
+		/*
+		 * If neither a proper type 1 ID nor a broken type 1 adaptor
+		 * as described above, assume type 1, but let the user know
+		 * that we may have misdetected the type.
+		 */
+		if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+			DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+				  adaptor_id);
+
 	}
 	}
 
 
 	if (is_hdmi_adaptor(hdmi_id))
 	if (is_hdmi_adaptor(hdmi_id))

+ 56 - 23
drivers/gpu/drm/drm_edid.c

@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
 	return ret == xfers ? 0 : -1;
 	return ret == xfers ? 0 : -1;
 }
 }
 
 
+static void connector_bad_edid(struct drm_connector *connector,
+			       u8 *edid, int num_blocks)
+{
+	int i;
+
+	if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+		return;
+
+	dev_warn(connector->dev->dev,
+		 "%s: EDID is invalid:\n",
+		 connector->name);
+	for (i = 0; i < num_blocks; i++) {
+		u8 *block = edid + i * EDID_LENGTH;
+		char prefix[20];
+
+		if (drm_edid_is_zero(block, EDID_LENGTH))
+			sprintf(prefix, "\t[%02x] ZERO ", i);
+		else if (!drm_edid_block_valid(block, i, false, NULL))
+			sprintf(prefix, "\t[%02x] BAD  ", i);
+		else
+			sprintf(prefix, "\t[%02x] GOOD ", i);
+
+		print_hex_dump(KERN_WARNING,
+			       prefix, DUMP_PREFIX_NONE, 16, 1,
+			       block, EDID_LENGTH, false);
+	}
+}
+
 /**
 /**
  * drm_do_get_edid - get EDID data using a custom EDID block read function
  * drm_do_get_edid - get EDID data using a custom EDID block read function
  * @connector: connector we're probing
  * @connector: connector we're probing
@@ -1283,7 +1311,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
 {
 {
 	int i, j = 0, valid_extensions = 0;
 	int i, j = 0, valid_extensions = 0;
 	u8 *edid, *new;
 	u8 *edid, *new;
-	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
 
 	if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 	if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 		return NULL;
 		return NULL;
@@ -1292,7 +1319,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
 	for (i = 0; i < 4; i++) {
 	for (i = 0; i < 4; i++) {
 		if (get_edid_block(data, edid, 0, EDID_LENGTH))
 		if (get_edid_block(data, edid, 0, EDID_LENGTH))
 			goto out;
 			goto out;
-		if (drm_edid_block_valid(edid, 0, print_bad_edid,
+		if (drm_edid_block_valid(edid, 0, false,
 					 &connector->edid_corrupt))
 					 &connector->edid_corrupt))
 			break;
 			break;
 		if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
 		if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
@@ -1304,54 +1331,60 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
 		goto carp;
 		goto carp;
 
 
 	/* if there's no extensions, we're done */
 	/* if there's no extensions, we're done */
-	if (edid[0x7e] == 0)
+	valid_extensions = edid[0x7e];
+	if (valid_extensions == 0)
 		return (struct edid *)edid;
 		return (struct edid *)edid;
 
 
-	new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+	new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
 	if (!new)
 	if (!new)
 		goto out;
 		goto out;
 	edid = new;
 	edid = new;
 
 
 	for (j = 1; j <= edid[0x7e]; j++) {
 	for (j = 1; j <= edid[0x7e]; j++) {
-		u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH;
+		u8 *block = edid + j * EDID_LENGTH;
 
 
 		for (i = 0; i < 4; i++) {
 		for (i = 0; i < 4; i++) {
 			if (get_edid_block(data, block, j, EDID_LENGTH))
 			if (get_edid_block(data, block, j, EDID_LENGTH))
 				goto out;
 				goto out;
-			if (drm_edid_block_valid(block, j,
-						 print_bad_edid, NULL)) {
-				valid_extensions++;
+			if (drm_edid_block_valid(block, j, false, NULL))
 				break;
 				break;
-			}
 		}
 		}
 
 
-		if (i == 4 && print_bad_edid) {
-			dev_warn(connector->dev->dev,
-			 "%s: Ignoring invalid EDID block %d.\n",
-			 connector->name, j);
-
-			connector->bad_edid_counter++;
-		}
+		if (i == 4)
+			valid_extensions--;
 	}
 	}
 
 
 	if (valid_extensions != edid[0x7e]) {
 	if (valid_extensions != edid[0x7e]) {
+		u8 *base;
+
+		connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
 		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
 		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
 		edid[0x7e] = valid_extensions;
 		edid[0x7e] = valid_extensions;
-		new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+
+		new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
 		if (!new)
 		if (!new)
 			goto out;
 			goto out;
+
+		base = new;
+		for (i = 0; i <= edid[0x7e]; i++) {
+			u8 *block = edid + i * EDID_LENGTH;
+
+			if (!drm_edid_block_valid(block, i, false, NULL))
+				continue;
+
+			memcpy(base, block, EDID_LENGTH);
+			base += EDID_LENGTH;
+		}
+
+		kfree(edid);
 		edid = new;
 		edid = new;
 	}
 	}
 
 
 	return (struct edid *)edid;
 	return (struct edid *)edid;
 
 
 carp:
 carp:
-	if (print_bad_edid) {
-		dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
-			 connector->name, j);
-	}
-	connector->bad_edid_counter++;
-
+	connector_bad_edid(connector, edid, 1);
 out:
 out:
 	kfree(edid);
 	kfree(edid);
 	return NULL;
 	return NULL;

+ 1 - 6
drivers/gpu/drm/drm_fb_helper.c

@@ -392,15 +392,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 		if (plane->type != DRM_PLANE_TYPE_PRIMARY)
 		if (plane->type != DRM_PLANE_TYPE_PRIMARY)
 			drm_plane_force_disable(plane);
 			drm_plane_force_disable(plane);
 
 
-		if (plane->rotation_property) {
+		if (plane->rotation_property)
 			drm_mode_plane_set_obj_prop(plane,
 			drm_mode_plane_set_obj_prop(plane,
 						    plane->rotation_property,
 						    plane->rotation_property,
 						    DRM_ROTATE_0);
 						    DRM_ROTATE_0);
-		} else if (dev->mode_config.rotation_property) {
-			drm_mode_plane_set_obj_prop(plane,
-						    dev->mode_config.rotation_property,
-						    DRM_ROTATE_0);
-		}
 	}
 	}
 
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 	for (i = 0; i < fb_helper->crtc_count; i++) {

+ 3 - 3
drivers/gpu/drm/drm_fops.c

@@ -665,7 +665,7 @@ void drm_event_cancel_free(struct drm_device *dev,
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
 
 	if (p->fence)
 	if (p->fence)
-		fence_put(p->fence);
+		dma_fence_put(p->fence);
 
 
 	kfree(p);
 	kfree(p);
 }
 }
@@ -696,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
 	}
 	}
 
 
 	if (e->fence) {
 	if (e->fence) {
-		fence_signal(e->fence);
-		fence_put(e->fence);
+		dma_fence_signal(e->fence);
+		dma_fence_put(e->fence);
 	}
 	}
 
 
 	if (!e->file_priv) {
 	if (!e->file_priv) {

+ 26 - 2
drivers/gpu/drm/drm_of.c

@@ -6,6 +6,11 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_of.h>
 #include <drm/drm_of.h>
 
 
+static void drm_release_of(struct device *dev, void *data)
+{
+	of_node_put(data);
+}
+
 /**
 /**
  * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
  * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
  * @dev: DRM device
  * @dev: DRM device
@@ -63,6 +68,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
 }
 }
 EXPORT_SYMBOL(drm_of_find_possible_crtcs);
 EXPORT_SYMBOL(drm_of_find_possible_crtcs);
 
 
+/**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+				struct component_match **matchptr,
+				int (*compare)(struct device *, void *),
+				struct device_node *node)
+{
+	of_node_get(node);
+	component_match_add_release(master, matchptr, drm_release_of,
+				    compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
 /**
 /**
  * drm_of_component_probe - Generic probe function for a component based master
  * drm_of_component_probe - Generic probe function for a component based master
  * @dev: master device containing the OF node
  * @dev: master device containing the OF node
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
 			continue;
 			continue;
 		}
 		}
 
 
-		component_match_add(dev, &match, compare_of, port);
+		drm_of_component_match_add(dev, &match, compare_of, port);
 		of_node_put(port);
 		of_node_put(port);
 	}
 	}
 
 
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
 				continue;
 				continue;
 			}
 			}
 
 
-			component_match_add(dev, &match, compare_of, remote);
+			drm_of_component_match_add(dev, &match, compare_of,
+						   remote);
 			of_node_put(remote);
 			of_node_put(remote);
 		}
 		}
 		of_node_put(port);
 		of_node_put(port);

+ 3 - 2
drivers/gpu/drm/etnaviv/etnaviv_drv.c

@@ -16,6 +16,7 @@
 
 
 #include <linux/component.h>
 #include <linux/component.h>
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
+#include <drm/drm_of.h>
 
 
 #include "etnaviv_drv.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_gpu.h"
@@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
 			if (!core_node)
 			if (!core_node)
 				break;
 				break;
 
 
-			component_match_add(&pdev->dev, &match, compare_of,
-					    core_node);
+			drm_of_component_match_add(&pdev->dev, &match,
+						   compare_of, core_node);
 			of_node_put(core_node);
 			of_node_put(core_node);
 		}
 		}
 	} else if (dev->platform_data) {
 	} else if (dev->platform_data) {

+ 3 - 3
drivers/gpu/drm/etnaviv/etnaviv_gem.c

@@ -466,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 }
 }
 
 
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 	const char *type, struct seq_file *m)
 	const char *type, struct seq_file *m)
 {
 {
-	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		seq_printf(m, "\t%9s: %s %s seq %u\n",
 		seq_printf(m, "\t%9s: %s %s seq %u\n",
 			   type,
 			   type,
 			   fence->ops->get_driver_name(fence),
 			   fence->ops->get_driver_name(fence),
@@ -482,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned long off = drm_vma_node_start(&obj->vma_node);
 	unsigned long off = drm_vma_node_start(&obj->vma_node);
 
 
 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",

+ 23 - 23
drivers/gpu/drm/etnaviv/etnaviv_gpu.c

@@ -15,7 +15,7 @@
  */
  */
 
 
 #include <linux/component.h>
 #include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
 #include "etnaviv_dump.h"
 #include "etnaviv_dump.h"
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work)
 	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
 		if (!gpu->event[i].used)
 		if (!gpu->event[i].used)
 			continue;
 			continue;
-		fence_signal(gpu->event[i].fence);
+		dma_fence_signal(gpu->event[i].fence);
 		gpu->event[i].fence = NULL;
 		gpu->event[i].fence = NULL;
 		gpu->event[i].used = false;
 		gpu->event[i].used = false;
 		complete(&gpu->event_free);
 		complete(&gpu->event_free);
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
 /* fence object management */
 /* fence object management */
 struct etnaviv_fence {
 struct etnaviv_fence {
 	struct etnaviv_gpu *gpu;
 	struct etnaviv_gpu *gpu;
-	struct fence base;
+	struct dma_fence base;
 };
 };
 
 
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
 {
 {
 	return container_of(fence, struct etnaviv_fence, base);
 	return container_of(fence, struct etnaviv_fence, base);
 }
 }
 
 
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "etnaviv";
 	return "etnaviv";
 }
 }
 
 
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
 {
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
 
 	return dev_name(f->gpu->dev);
 	return dev_name(f->gpu->dev);
 }
 }
 
 
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
 {
 {
 	return true;
 	return true;
 }
 }
 
 
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
 {
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
 
 	return fence_completed(f->gpu, f->base.seqno);
 	return fence_completed(f->gpu, f->base.seqno);
 }
 }
 
 
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
 {
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
 
 	kfree_rcu(f, base.rcu);
 	kfree_rcu(f, base.rcu);
 }
 }
 
 
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
 	.get_driver_name = etnaviv_fence_get_driver_name,
 	.get_driver_name = etnaviv_fence_get_driver_name,
 	.get_timeline_name = etnaviv_fence_get_timeline_name,
 	.get_timeline_name = etnaviv_fence_get_timeline_name,
 	.enable_signaling = etnaviv_fence_enable_signaling,
 	.enable_signaling = etnaviv_fence_enable_signaling,
 	.signaled = etnaviv_fence_signaled,
 	.signaled = etnaviv_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = etnaviv_fence_release,
 	.release = etnaviv_fence_release,
 };
 };
 
 
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 {
 {
 	struct etnaviv_fence *f;
 	struct etnaviv_fence *f;
 
 
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 
 
 	f->gpu = gpu;
 	f->gpu = gpu;
 
 
-	fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
-		   gpu->fence_context, ++gpu->next_fence);
+	dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+		       gpu->fence_context, ++gpu->next_fence);
 
 
 	return &f->base;
 	return &f->base;
 }
 }
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 {
 {
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object *robj = etnaviv_obj->resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int i, ret;
 	int i, ret;
 
 
 	if (!exclusive) {
 	if (!exclusive) {
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 		/* Wait on any existing exclusive fence which isn't our own */
 		/* Wait on any existing exclusive fence which isn't our own */
 		fence = reservation_object_get_excl(robj);
 		fence = reservation_object_get_excl(robj);
 		if (fence && fence->context != context) {
 		if (fence && fence->context != context) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
 		}
 		}
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
 		fence = rcu_dereference_protected(fobj->shared[i],
 		fence = rcu_dereference_protected(fobj->shared[i],
 						reservation_object_held(robj));
 						reservation_object_held(robj));
 		if (fence->context != context) {
 		if (fence->context != context) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
 		}
 		}
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work)
 
 
 	mutex_lock(&gpu->lock);
 	mutex_lock(&gpu->lock);
 	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
 	list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
-		if (!fence_is_signaled(cmdbuf->fence))
+		if (!dma_fence_is_signaled(cmdbuf->fence))
 			break;
 			break;
 
 
 		list_del(&cmdbuf->node);
 		list_del(&cmdbuf->node);
-		fence_put(cmdbuf->fence);
+		dma_fence_put(cmdbuf->fence);
 
 
 		for (i = 0; i < cmdbuf->nr_bos; i++) {
 		for (i = 0; i < cmdbuf->nr_bos; i++) {
 			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
 			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
 	struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
 {
 {
-	struct fence *fence;
+	struct dma_fence *fence;
 	unsigned int event, i;
 	unsigned int event, i;
 	int ret;
 	int ret;
 
 
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data)
 		}
 		}
 
 
 		while ((event = ffs(intr)) != 0) {
 		while ((event = ffs(intr)) != 0) {
-			struct fence *fence;
+			struct dma_fence *fence;
 
 
 			event -= 1;
 			event -= 1;
 
 
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data)
 
 
 			fence = gpu->event[event].fence;
 			fence = gpu->event[event].fence;
 			gpu->event[event].fence = NULL;
 			gpu->event[event].fence = NULL;
-			fence_signal(fence);
+			dma_fence_signal(fence);
 
 
 			/*
 			/*
 			 * Events can be processed out of order.  Eg,
 			 * Events can be processed out of order.  Eg,
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 		return ret;
 		return ret;
 
 
 	gpu->drm = drm;
 	gpu->drm = drm;
-	gpu->fence_context = fence_context_alloc(1);
+	gpu->fence_context = dma_fence_context_alloc(1);
 	spin_lock_init(&gpu->fence_spinlock);
 	spin_lock_init(&gpu->fence_spinlock);
 
 
 	INIT_LIST_HEAD(&gpu->active_cmd_list);
 	INIT_LIST_HEAD(&gpu->active_cmd_list);

+ 2 - 2
drivers/gpu/drm/etnaviv/etnaviv_gpu.h

@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
 
 
 struct etnaviv_event {
 struct etnaviv_event {
 	bool used;
 	bool used;
-	struct fence *fence;
+	struct dma_fence *fence;
 };
 };
 
 
 struct etnaviv_cmdbuf;
 struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
 	/* vram node used if the cmdbuf is mapped through the MMUv2 */
 	/* vram node used if the cmdbuf is mapped through the MMUv2 */
 	struct drm_mm_node vram_node;
 	struct drm_mm_node vram_node;
 	/* fence after which this buffer is to be disposed */
 	/* fence after which this buffer is to be disposed */
-	struct fence *fence;
+	struct dma_fence *fence;
 	/* target exec state */
 	/* target exec state */
 	u32 exec_state;
 	u32 exec_state;
 	/* per GPU in-flight list */
 	/* per GPU in-flight list */

+ 4 - 3
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c

@@ -24,6 +24,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
 
 
 #include "kirin_drm_drv.h"
 #include "kirin_drm_drv.h"
 
 
@@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
 		DRM_ERROR("no valid endpoint node\n");
 		DRM_ERROR("no valid endpoint node\n");
 		return ERR_PTR(-ENODEV);
 		return ERR_PTR(-ENODEV);
 	}
 	}
-	of_node_put(endpoint);
 
 
 	remote = of_graph_get_remote_port_parent(endpoint);
 	remote = of_graph_get_remote_port_parent(endpoint);
+	of_node_put(endpoint);
 	if (!remote) {
 	if (!remote) {
 		DRM_ERROR("no valid remote node\n");
 		DRM_ERROR("no valid remote node\n");
 		return ERR_PTR(-ENODEV);
 		return ERR_PTR(-ENODEV);
 	}
 	}
-	of_node_put(remote);
 
 
 	if (!of_device_is_available(remote)) {
 	if (!of_device_is_available(remote)) {
 		DRM_ERROR("not available for remote node\n");
 		DRM_ERROR("not available for remote node\n");
@@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
 	if (IS_ERR(remote))
 	if (IS_ERR(remote))
 		return PTR_ERR(remote);
 		return PTR_ERR(remote);
 
 
-	component_match_add(dev, &match, compare_of, remote);
+	drm_of_component_match_add(dev, &match, compare_of, remote);
+	of_node_put(remote);
 
 
 	return component_master_add_with_match(dev, &kirin_drm_ops, match);
 	return component_master_add_with_match(dev, &kirin_drm_ops, match);
 
 

+ 2 - 1
drivers/gpu/drm/i2c/tda998x_drv.c

@@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
 	mutex_unlock(&priv->audio_mutex);
 	mutex_unlock(&priv->audio_mutex);
 }
 }
 
 
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int
+tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
 {
 	struct tda998x_priv *priv = dev_get_drvdata(dev);
 	struct tda998x_priv *priv = dev_get_drvdata(dev);
 
 

+ 16 - 16
drivers/gpu/drm/i915/i915_gem_request.c

@@ -26,12 +26,12 @@
 
 
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "i915";
 	return "i915";
 }
 }
 
 
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
 {
 	/* Timelines are bound by eviction to a VM. However, since
 	/* Timelines are bound by eviction to a VM. However, since
 	 * we only have a global seqno at the moment, we only have
 	 * we only have a global seqno at the moment, we only have
@@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence)
 	return "global";
 	return "global";
 }
 }
 
 
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
 {
 {
 	return i915_gem_request_completed(to_request(fence));
 	return i915_gem_request_completed(to_request(fence));
 }
 }
 
 
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
 {
 	if (i915_fence_signaled(fence))
 	if (i915_fence_signaled(fence))
 		return false;
 		return false;
@@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence)
 	return true;
 	return true;
 }
 }
 
 
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
 				   bool interruptible,
 				   bool interruptible,
 				   signed long timeout_jiffies)
 				   signed long timeout_jiffies)
 {
 {
@@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence,
 	return timeout_jiffies;
 	return timeout_jiffies;
 }
 }
 
 
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
+static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
 {
 {
 	snprintf(str, size, "%u", fence->seqno);
 	snprintf(str, size, "%u", fence->seqno);
 }
 }
 
 
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
 					  int size)
 					  int size)
 {
 {
 	snprintf(str, size, "%u",
 	snprintf(str, size, "%u",
 		 intel_engine_get_seqno(to_request(fence)->engine));
 		 intel_engine_get_seqno(to_request(fence)->engine));
 }
 }
 
 
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
 {
 {
 	struct drm_i915_gem_request *req = to_request(fence);
 	struct drm_i915_gem_request *req = to_request(fence);
 
 
 	kmem_cache_free(req->i915->requests, req);
 	kmem_cache_free(req->i915->requests, req);
 }
 }
 
 
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
 	.get_driver_name = i915_fence_get_driver_name,
 	.get_driver_name = i915_fence_get_driver_name,
 	.get_timeline_name = i915_fence_get_timeline_name,
 	.get_timeline_name = i915_fence_get_timeline_name,
 	.enable_signaling = i915_fence_enable_signaling,
 	.enable_signaling = i915_fence_enable_signaling,
@@ -388,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * The reference count is incremented atomically. If it is zero,
 	 * The reference count is incremented atomically. If it is zero,
 	 * the lookup knows the request is unallocated and complete. Otherwise,
 	 * the lookup knows the request is unallocated and complete. Otherwise,
 	 * it is either still in use, or has been reallocated and reset
 	 * it is either still in use, or has been reallocated and reset
-	 * with fence_init(). This increment is safe for release as we check
-	 * that the request we have a reference to and matches the active
+	 * with dma_fence_init(). This increment is safe for release as we
+	 * check that the request we have a reference to and matches the active
 	 * request.
 	 * request.
 	 *
 	 *
 	 * Before we increment the refcount, we chase the request->engine
 	 * Before we increment the refcount, we chase the request->engine
@@ -412,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 		goto err;
 		goto err;
 
 
 	spin_lock_init(&req->lock);
 	spin_lock_init(&req->lock);
-	fence_init(&req->fence,
-		   &i915_fence_ops,
-		   &req->lock,
-		   engine->fence_context,
-		   seqno);
+	dma_fence_init(&req->fence,
+		       &i915_fence_ops,
+		       &req->lock,
+		       engine->fence_context,
+		       seqno);
 
 
 	i915_sw_fence_init(&req->submit, submit_notify);
 	i915_sw_fence_init(&req->submit, submit_notify);
 
 

+ 9 - 9
drivers/gpu/drm/i915/i915_gem_request.h

@@ -25,7 +25,7 @@
 #ifndef I915_GEM_REQUEST_H
 #ifndef I915_GEM_REQUEST_H
 #define I915_GEM_REQUEST_H
 #define I915_GEM_REQUEST_H
 
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 #include "i915_gem.h"
 #include "i915_gem.h"
 #include "i915_sw_fence.h"
 #include "i915_sw_fence.h"
@@ -62,7 +62,7 @@ struct intel_signal_node {
  * The requests are reference counted.
  * The requests are reference counted.
  */
  */
 struct drm_i915_gem_request {
 struct drm_i915_gem_request {
-	struct fence fence;
+	struct dma_fence fence;
 	spinlock_t lock;
 	spinlock_t lock;
 
 
 	/** On Which ring this request was generated */
 	/** On Which ring this request was generated */
@@ -145,9 +145,9 @@ struct drm_i915_gem_request {
 	struct list_head execlist_link;
 	struct list_head execlist_link;
 };
 };
 
 
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
 
 
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool fence_is_i915(struct dma_fence *fence)
 {
 {
 	return fence->ops == &i915_fence_ops;
 	return fence->ops == &i915_fence_ops;
 }
 }
@@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req)
 }
 }
 
 
 static inline struct drm_i915_gem_request *
 static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
 {
 {
 	/* We assume that NULL fence/request are interoperable */
 	/* We assume that NULL fence/request are interoperable */
 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
@@ -183,19 +183,19 @@ to_request(struct fence *fence)
 static inline struct drm_i915_gem_request *
 static inline struct drm_i915_gem_request *
 i915_gem_request_get(struct drm_i915_gem_request *req)
 i915_gem_request_get(struct drm_i915_gem_request *req)
 {
 {
-	return to_request(fence_get(&req->fence));
+	return to_request(dma_fence_get(&req->fence));
 }
 }
 
 
 static inline struct drm_i915_gem_request *
 static inline struct drm_i915_gem_request *
 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
 {
 {
-	return to_request(fence_get_rcu(&req->fence));
+	return to_request(dma_fence_get_rcu(&req->fence));
 }
 }
 
 
 static inline void
 static inline void
 i915_gem_request_put(struct drm_i915_gem_request *req)
 i915_gem_request_put(struct drm_i915_gem_request *req)
 {
 {
-	fence_put(&req->fence);
+	dma_fence_put(&req->fence);
 }
 }
 
 
 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
 		 * compiler.
 		 * compiler.
 		 *
 		 *
 		 * The atomic operation at the heart of
 		 * The atomic operation at the heart of
-		 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+		 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
 		 * atomic_inc_not_zero() which is only a full memory barrier
 		 * atomic_inc_not_zero() which is only a full memory barrier
 		 * when successful. That is, if i915_gem_request_get_rcu()
 		 * when successful. That is, if i915_gem_request_get_rcu()
 		 * returns the request (and so with the reference counted
 		 * returns the request (and so with the reference counted

+ 21 - 20
drivers/gpu/drm/i915/i915_sw_fence.c

@@ -8,7 +8,7 @@
  */
  */
 
 
 #include <linux/slab.h>
 #include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <linux/reservation.h>
 #include <linux/reservation.h>
 
 
 #include "i915_sw_fence.h"
 #include "i915_sw_fence.h"
@@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 	return pending;
 	return pending;
 }
 }
 
 
-struct dma_fence_cb {
-	struct fence_cb base;
+struct i915_sw_dma_fence_cb {
+	struct dma_fence_cb base;
 	struct i915_sw_fence *fence;
 	struct i915_sw_fence *fence;
-	struct fence *dma;
+	struct dma_fence *dma;
 	struct timer_list timer;
 	struct timer_list timer;
 };
 };
 
 
 static void timer_i915_sw_fence_wake(unsigned long data)
 static void timer_i915_sw_fence_wake(unsigned long data)
 {
 {
-	struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+	struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
 
 
 	printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
 	printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
 	       cb->dma->ops->get_driver_name(cb->dma),
 	       cb->dma->ops->get_driver_name(cb->dma),
 	       cb->dma->ops->get_timeline_name(cb->dma),
 	       cb->dma->ops->get_timeline_name(cb->dma),
 	       cb->dma->seqno);
 	       cb->dma->seqno);
-	fence_put(cb->dma);
+	dma_fence_put(cb->dma);
 	cb->dma = NULL;
 	cb->dma = NULL;
 
 
 	i915_sw_fence_commit(cb->fence);
 	i915_sw_fence_commit(cb->fence);
 	cb->timer.function = NULL;
 	cb->timer.function = NULL;
 }
 }
 
 
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+				   struct dma_fence_cb *data)
 {
 {
-	struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
 
 
 	del_timer_sync(&cb->timer);
 	del_timer_sync(&cb->timer);
 	if (cb->timer.function)
 	if (cb->timer.function)
 		i915_sw_fence_commit(cb->fence);
 		i915_sw_fence_commit(cb->fence);
-	fence_put(cb->dma);
+	dma_fence_put(cb->dma);
 
 
 	kfree(cb);
 	kfree(cb);
 }
 }
 
 
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
-				  struct fence *dma,
+				  struct dma_fence *dma,
 				  unsigned long timeout,
 				  unsigned long timeout,
 				  gfp_t gfp)
 				  gfp_t gfp)
 {
 {
-	struct dma_fence_cb *cb;
+	struct i915_sw_dma_fence_cb *cb;
 	int ret;
 	int ret;
 
 
-	if (fence_is_signaled(dma))
+	if (dma_fence_is_signaled(dma))
 		return 0;
 		return 0;
 
 
 	cb = kmalloc(sizeof(*cb), gfp);
 	cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 		if (!gfpflags_allow_blocking(gfp))
 		if (!gfpflags_allow_blocking(gfp))
 			return -ENOMEM;
 			return -ENOMEM;
 
 
-		return fence_wait(dma, false);
+		return dma_fence_wait(dma, false);
 	}
 	}
 
 
 	cb->fence = i915_sw_fence_get(fence);
 	cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 		      timer_i915_sw_fence_wake, (unsigned long)cb,
 		      timer_i915_sw_fence_wake, (unsigned long)cb,
 		      TIMER_IRQSAFE);
 		      TIMER_IRQSAFE);
 	if (timeout) {
 	if (timeout) {
-		cb->dma = fence_get(dma);
+		cb->dma = dma_fence_get(dma);
 		mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
 		mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
 	}
 	}
 
 
-	ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+	ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
 	if (ret == 0) {
 	if (ret == 0) {
 		ret = 1;
 		ret = 1;
 	} else {
 	} else {
@@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 
 
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
 				    struct reservation_object *resv,
-				    const struct fence_ops *exclude,
+				    const struct dma_fence_ops *exclude,
 				    bool write,
 				    bool write,
 				    unsigned long timeout,
 				    unsigned long timeout,
 				    gfp_t gfp)
 				    gfp_t gfp)
 {
 {
-	struct fence *excl;
+	struct dma_fence *excl;
 	int ret = 0, pending;
 	int ret = 0, pending;
 
 
 	if (write) {
 	if (write) {
-		struct fence **shared;
+		struct dma_fence **shared;
 		unsigned int count, i;
 		unsigned int count, i;
 
 
 		ret = reservation_object_get_fences_rcu(resv,
 		ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 		}
 		}
 
 
 		for (i = 0; i < count; i++)
 		for (i = 0; i < count; i++)
-			fence_put(shared[i]);
+			dma_fence_put(shared[i]);
 		kfree(shared);
 		kfree(shared);
 	} else {
 	} else {
 		excl = reservation_object_get_excl_rcu(resv);
 		excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 			ret |= pending;
 			ret |= pending;
 	}
 	}
 
 
-	fence_put(excl);
+	dma_fence_put(excl);
 
 
 	return ret;
 	return ret;
 }
 }

+ 4 - 4
drivers/gpu/drm/i915/i915_sw_fence.h

@@ -16,8 +16,8 @@
 #include <linux/wait.h>
 #include <linux/wait.h>
 
 
 struct completion;
 struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
 struct reservation_object;
 struct reservation_object;
 
 
 struct i915_sw_fence {
 struct i915_sw_fence {
@@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 				 struct i915_sw_fence *after,
 				 struct i915_sw_fence *after,
 				 wait_queue_t *wq);
 				 wait_queue_t *wq);
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
-				  struct fence *dma,
+				  struct dma_fence *dma,
 				  unsigned long timeout,
 				  unsigned long timeout,
 				  gfp_t gfp);
 				  gfp_t gfp);
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
 				    struct reservation_object *resv,
-				    const struct fence_ops *exclude,
+				    const struct dma_fence_ops *exclude,
 				    bool write,
 				    bool write,
 				    unsigned long timeout,
 				    unsigned long timeout,
 				    gfp_t gfp);
 				    gfp_t gfp);

+ 1 - 1
drivers/gpu/drm/i915/i915_trace.h

@@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
 			   __entry->ring = req->engine->id;
 			   __entry->ring = req->engine->id;
 			   __entry->seqno = req->fence.seqno;
 			   __entry->seqno = req->fence.seqno;
 			   __entry->flags = flags;
 			   __entry->flags = flags;
-			   fence_enable_sw_signaling(&req->fence);
+			   dma_fence_enable_sw_signaling(&req->fence);
 			   ),
 			   ),
 
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",

+ 2 - 2
drivers/gpu/drm/i915/intel_breadcrumbs.c

@@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg)
 						 &request->signaling.wait);
 						 &request->signaling.wait);
 
 
 			local_bh_disable();
 			local_bh_disable();
-			fence_signal(&request->fence);
+			dma_fence_signal(&request->fence);
 			local_bh_enable(); /* kick start the tasklets */
 			local_bh_enable(); /* kick start the tasklets */
 
 
 			/* Find the next oldest signal. Note that as we have
 			/* Find the next oldest signal. Note that as we have
@@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 	struct rb_node *parent, **p;
 	struct rb_node *parent, **p;
 	bool first, wakeup;
 	bool first, wakeup;
 
 
-	/* locked by fence_enable_sw_signaling() */
+	/* locked by dma_fence_enable_sw_signaling() */
 	assert_spin_locked(&request->lock);
 	assert_spin_locked(&request->lock);
 
 
 	request->signaling.wait.tsk = b->signaler;
 	request->signaling.wait.tsk = b->signaler;

+ 4 - 7
drivers/gpu/drm/i915/intel_dp.c

@@ -1459,8 +1459,7 @@ static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
 	if ((drm_debug & DRM_UT_KMS) == 0)
 	if ((drm_debug & DRM_UT_KMS) == 0)
 		return;
 		return;
 
 
-	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-	      DP_DWN_STRM_PORT_PRESENT))
+	if (!drm_dp_is_branch(intel_dp->dpcd))
 		return;
 		return;
 
 
 	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
 	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
@@ -1478,8 +1477,7 @@ static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
 	if ((drm_debug & DRM_UT_KMS) == 0)
 	if ((drm_debug & DRM_UT_KMS) == 0)
 		return;
 		return;
 
 
-	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-	      DP_DWN_STRM_PORT_PRESENT))
+	if (!drm_dp_is_branch(intel_dp->dpcd))
 		return;
 		return;
 
 
 	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
 	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
@@ -3615,8 +3613,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
 		return false;
 		return false;
 
 
-	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-	      DP_DWN_STRM_PORT_PRESENT))
+	if (!drm_dp_is_branch(intel_dp->dpcd))
 		return true; /* native DP sink */
 		return true; /* native DP sink */
 
 
 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
@@ -4134,7 +4131,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 		return connector_status_connected;
 		return connector_status_connected;
 
 
 	/* if there's no downstream port, we're done */
 	/* if there's no downstream port, we're done */
-	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+	if (!drm_dp_is_branch(dpcd))
 		return connector_status_connected;
 		return connector_status_connected;
 
 
 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
 	/* If we're HPD-aware, SINK_COUNT changes dynamically */

+ 1 - 1
drivers/gpu/drm/i915/intel_engine_cs.c

@@ -245,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	spin_lock_init(&engine->execlist_lock);
 	spin_lock_init(&engine->execlist_lock);
 
 
-	engine->fence_context = fence_context_alloc(1);
+	engine->fence_context = dma_fence_context_alloc(1);
 
 
 	intel_engine_init_requests(engine);
 	intel_engine_init_requests(engine);
 	intel_engine_init_hangcheck(engine);
 	intel_engine_init_hangcheck(engine);

+ 3 - 1
drivers/gpu/drm/mediatek/mtk_drm_drv.c

@@ -18,6 +18,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
 #include <linux/component.h>
 #include <linux/component.h>
 #include <linux/iommu.h>
 #include <linux/iommu.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
@@ -416,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
 		    comp_type == MTK_DPI) {
 		    comp_type == MTK_DPI) {
 			dev_info(dev, "Adding component match for %s\n",
 			dev_info(dev, "Adding component match for %s\n",
 				 node->full_name);
 				 node->full_name);
-			component_match_add(dev, &match, compare_of, node);
+			drm_of_component_match_add(dev, &match, compare_of,
+						   node);
 		} else {
 		} else {
 			struct mtk_ddp_comp *comp;
 			struct mtk_ddp_comp *comp;
 
 

+ 0 - 3
drivers/gpu/drm/msm/adreno/adreno_device.c

@@ -25,9 +25,6 @@ bool hang_debug = false;
 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
 module_param_named(hang_debug, hang_debug, bool, 0600);
 module_param_named(hang_debug, hang_debug, bool, 0600);
 
 
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
 static const struct adreno_info gpulist[] = {
 static const struct adreno_info gpulist[] = {
 	{
 	{
 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),
 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),

+ 3 - 0
drivers/gpu/drm/msm/adreno/adreno_gpu.h

@@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
 		gpu_write(&gpu->base, reg - 1, data);
 		gpu_write(&gpu->base, reg - 1, data);
 }
 }
 
 
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+
 #endif /* __ADRENO_GPU_H__ */
 #endif /* __ADRENO_GPU_H__ */

+ 22 - 13
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c

@@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
 		!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
 		!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
 		return;
 		return;
 
 
-	if (!dev->mode_config.rotation_property)
-		dev->mode_config.rotation_property =
-			drm_mode_create_rotation_property(dev,
-				DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
-	if (dev->mode_config.rotation_property)
-		drm_object_attach_property(&plane->base,
-			dev->mode_config.rotation_property,
-			DRM_ROTATE_0);
+	drm_plane_create_rotation_property(plane,
+					   DRM_ROTATE_0,
+					   DRM_ROTATE_0 |
+					   DRM_ROTATE_180 |
+					   DRM_REFLECT_X |
+					   DRM_REFLECT_Y);
 }
 }
 
 
 /* helper to install properties which are common to planes and crtcs */
 /* helper to install properties which are common to planes and crtcs */
@@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 			plane_enabled(old_state), plane_enabled(state));
 			plane_enabled(old_state), plane_enabled(state));
 
 
 	if (plane_enabled(state)) {
 	if (plane_enabled(state)) {
+		unsigned int rotation;
+
 		format = to_mdp_format(msm_framebuffer_format(state->fb));
 		format = to_mdp_format(msm_framebuffer_format(state->fb));
 		if (MDP_FORMAT_IS_YUV(format) &&
 		if (MDP_FORMAT_IS_YUV(format) &&
 			!pipe_supports_yuv(mdp5_plane->caps)) {
 			!pipe_supports_yuv(mdp5_plane->caps)) {
@@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
-		hflip = !!(state->rotation & DRM_REFLECT_X);
-		vflip = !!(state->rotation & DRM_REFLECT_Y);
+		rotation = drm_rotation_simplify(state->rotation,
+						 DRM_ROTATE_0 |
+						 DRM_REFLECT_X |
+						 DRM_REFLECT_Y);
+		hflip = !!(rotation & DRM_REFLECT_X);
+		vflip = !!(rotation & DRM_REFLECT_Y);
+
 		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
 		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
 			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
 			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
 			dev_err(plane->dev->dev,
 			dev_err(plane->dev->dev,
@@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
 	int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
 	uint32_t hdecm = 0, vdecm = 0;
 	uint32_t hdecm = 0, vdecm = 0;
 	uint32_t pix_format;
 	uint32_t pix_format;
+	unsigned int rotation;
 	bool vflip, hflip;
 	bool vflip, hflip;
 	unsigned long flags;
 	unsigned long flags;
 	int ret;
 	int ret;
@@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	config |= get_scale_config(format, src_h, crtc_h, false);
 	config |= get_scale_config(format, src_h, crtc_h, false);
 	DBG("scale config = %x", config);
 	DBG("scale config = %x", config);
 
 
-	hflip = !!(pstate->rotation & DRM_REFLECT_X);
-	vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+	rotation = drm_rotation_simplify(pstate->rotation,
+					 DRM_ROTATE_0 |
+					 DRM_REFLECT_X |
+					 DRM_REFLECT_Y);
+	hflip = !!(rotation & DRM_REFLECT_X);
+	vflip = !!(rotation & DRM_REFLECT_Y);
 
 
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
 

+ 1 - 0
drivers/gpu/drm/msm/msm_debugfs.c

@@ -18,6 +18,7 @@
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
 #include "msm_drv.h"
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gpu.h"
+#include "msm_debugfs.h"
 
 
 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
 {
 {

+ 7 - 5
drivers/gpu/drm/msm/msm_drv.c

@@ -15,6 +15,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
+#include <drm/drm_of.h>
+
 #include "msm_drv.h"
 #include "msm_drv.h"
 #include "msm_debugfs.h"
 #include "msm_debugfs.h"
 #include "msm_fence.h"
 #include "msm_fence.h"
@@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev,
 			continue;
 			continue;
 		}
 		}
 
 
-		component_match_add(master_dev, matchptr, compare_of, intf);
-
+		drm_of_component_match_add(master_dev, matchptr, compare_of,
+					   intf);
 		of_node_put(intf);
 		of_node_put(intf);
 		of_node_put(ep_node);
 		of_node_put(ep_node);
 	}
 	}
@@ -962,8 +964,8 @@ static int add_display_components(struct device *dev,
 		put_device(mdp_dev);
 		put_device(mdp_dev);
 
 
 		/* add the MDP component itself */
 		/* add the MDP component itself */
-		component_match_add(dev, matchptr, compare_of,
-				    mdp_dev->of_node);
+		drm_of_component_match_add(dev, matchptr, compare_of,
+					   mdp_dev->of_node);
 	} else {
 	} else {
 		/* MDP4 */
 		/* MDP4 */
 		mdp_dev = dev;
 		mdp_dev = dev;
@@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev,
 	if (!np)
 	if (!np)
 		return 0;
 		return 0;
 
 
-	component_match_add(dev, matchptr, compare_of, np);
+	drm_of_component_match_add(dev, matchptr, compare_of, np);
 
 
 	of_node_put(np);
 	of_node_put(np);
 
 

+ 1 - 1
drivers/gpu/drm/msm/msm_drv.h

@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
 int msm_gem_sync_object(struct drm_gem_object *obj,
 int msm_gem_sync_object(struct drm_gem_object *obj,
 		struct msm_fence_context *fctx, bool exclusive);
 		struct msm_fence_context *fctx, bool exclusive);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-		struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);

+ 14 - 14
drivers/gpu/drm/msm/msm_fence.c

@@ -15,7 +15,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 
 
 #include "msm_drv.h"
 #include "msm_drv.h"
 #include "msm_fence.h"
 #include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
 
 
 	fctx->dev = dev;
 	fctx->dev = dev;
 	fctx->name = name;
 	fctx->name = name;
-	fctx->context = fence_context_alloc(1);
+	fctx->context = dma_fence_context_alloc(1);
 	init_waitqueue_head(&fctx->event);
 	init_waitqueue_head(&fctx->event);
 	spin_lock_init(&fctx->spinlock);
 	spin_lock_init(&fctx->spinlock);
 
 
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 
 
 struct msm_fence {
 struct msm_fence {
 	struct msm_fence_context *fctx;
 	struct msm_fence_context *fctx;
-	struct fence base;
+	struct dma_fence base;
 };
 };
 
 
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
 {
 {
 	return container_of(fence, struct msm_fence, base);
 	return container_of(fence, struct msm_fence, base);
 }
 }
 
 
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "msm";
 	return "msm";
 }
 }
 
 
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
 {
 {
 	struct msm_fence *f = to_msm_fence(fence);
 	struct msm_fence *f = to_msm_fence(fence);
 	return f->fctx->name;
 	return f->fctx->name;
 }
 }
 
 
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
 {
 {
 	return true;
 	return true;
 }
 }
 
 
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
 {
 {
 	struct msm_fence *f = to_msm_fence(fence);
 	struct msm_fence *f = to_msm_fence(fence);
 	return fence_completed(f->fctx, f->base.seqno);
 	return fence_completed(f->fctx, f->base.seqno);
 }
 }
 
 
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
 {
 {
 	struct msm_fence *f = to_msm_fence(fence);
 	struct msm_fence *f = to_msm_fence(fence);
 	kfree_rcu(f, base.rcu);
 	kfree_rcu(f, base.rcu);
 }
 }
 
 
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
 	.get_driver_name = msm_fence_get_driver_name,
 	.get_driver_name = msm_fence_get_driver_name,
 	.get_timeline_name = msm_fence_get_timeline_name,
 	.get_timeline_name = msm_fence_get_timeline_name,
 	.enable_signaling = msm_fence_enable_signaling,
 	.enable_signaling = msm_fence_enable_signaling,
 	.signaled = msm_fence_signaled,
 	.signaled = msm_fence_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = msm_fence_release,
 	.release = msm_fence_release,
 };
 };
 
 
-struct fence *
+struct dma_fence *
 msm_fence_alloc(struct msm_fence_context *fctx)
 msm_fence_alloc(struct msm_fence_context *fctx)
 {
 {
 	struct msm_fence *f;
 	struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
 
 
 	f->fctx = fctx;
 	f->fctx = fctx;
 
 
-	fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
-			fctx->context, ++fctx->last_fence);
+	dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+		       fctx->context, ++fctx->last_fence);
 
 
 	return &f->base;
 	return &f->base;
 }
 }

+ 1 - 1
drivers/gpu/drm/msm/msm_fence.h

@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
 		struct msm_fence_cb *cb, uint32_t fence);
 		struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
 
 
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
 
 
 #endif
 #endif

+ 7 - 7
drivers/gpu/drm/msm/msm_gem.c

@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 {
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	int i, ret;
 	int i, ret;
 
 
 	if (!exclusive) {
 	if (!exclusive) {
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 		fence = reservation_object_get_excl(msm_obj->resv);
 		fence = reservation_object_get_excl(msm_obj->resv);
 		/* don't need to wait on our own fences, since ring is fifo */
 		/* don't need to wait on our own fences, since ring is fifo */
 		if (fence && (fence->context != fctx->context)) {
 		if (fence && (fence->context != fctx->context)) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
 		}
 		}
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 		fence = rcu_dereference_protected(fobj->shared[i],
 		fence = rcu_dereference_protected(fobj->shared[i],
 						reservation_object_held(msm_obj->resv));
 						reservation_object_held(msm_obj->resv));
 		if (fence->context != fctx->context) {
 		if (fence->context != fctx->context) {
-			ret = fence_wait(fence, true);
+			ret = dma_fence_wait(fence, true);
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
 		}
 		}
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 }
 }
 
 
 void msm_gem_move_to_active(struct drm_gem_object *obj,
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-		struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 {
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
 }
 }
 
 
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
 		struct seq_file *m)
 		struct seq_file *m)
 {
 {
-	if (!fence_is_signaled(fence))
+	if (!dma_fence_is_signaled(fence))
 		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
 		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
 				fence->ops->get_driver_name(fence),
 				fence->ops->get_driver_name(fence),
 				fence->ops->get_timeline_name(fence),
 				fence->ops->get_timeline_name(fence),
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct reservation_object *robj = msm_obj->resv;
 	struct reservation_object *robj = msm_obj->resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
-	struct fence *fence;
+	struct dma_fence *fence;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
 	const char *madv;
 	const char *madv;
 
 

+ 1 - 1
drivers/gpu/drm/msm/msm_gem.h

@@ -104,7 +104,7 @@ struct msm_gem_submit {
 	struct list_head node;   /* node in gpu submit_list */
 	struct list_head node;   /* node in gpu submit_list */
 	struct list_head bo_list;
 	struct list_head bo_list;
 	struct ww_acquire_ctx ticket;
 	struct ww_acquire_ctx ticket;
-	struct fence *fence;
+	struct dma_fence *fence;
 	struct pid *pid;    /* submitting process */
 	struct pid *pid;    /* submitting process */
 	bool valid;         /* true if no cmdstream patching needed */
 	bool valid;         /* true if no cmdstream patching needed */
 	unsigned int nr_cmds;
 	unsigned int nr_cmds;

+ 4 - 4
drivers/gpu/drm/msm/msm_gem_submit.c

@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 
 
 void msm_gem_submit_free(struct msm_gem_submit *submit)
 void msm_gem_submit_free(struct msm_gem_submit *submit)
 {
 {
-	fence_put(submit->fence);
+	dma_fence_put(submit->fence);
 	list_del(&submit->node);
 	list_del(&submit->node);
 	put_pid(submit->pid);
 	put_pid(submit->pid);
 	kfree(submit);
 	kfree(submit);
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_gem_submit *submit;
 	struct msm_gem_submit *submit;
 	struct msm_gpu *gpu = priv->gpu;
 	struct msm_gpu *gpu = priv->gpu;
-	struct fence *in_fence = NULL;
+	struct dma_fence *in_fence = NULL;
 	struct sync_file *sync_file = NULL;
 	struct sync_file *sync_file = NULL;
 	int out_fence_fd = -1;
 	int out_fence_fd = -1;
 	unsigned i;
 	unsigned i;
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		 */
 		 */
 
 
 		if (in_fence->context != gpu->fctx->context) {
 		if (in_fence->context != gpu->fctx->context) {
-			ret = fence_wait(in_fence, true);
+			ret = dma_fence_wait(in_fence, true);
 			if (ret)
 			if (ret)
 				goto out;
 				goto out;
 		}
 		}
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 
 
 out:
 out:
 	if (in_fence)
 	if (in_fence)
-		fence_put(in_fence);
+		dma_fence_put(in_fence);
 	submit_cleanup(submit);
 	submit_cleanup(submit);
 	if (ret)
 	if (ret)
 		msm_gem_submit_free(submit);
 		msm_gem_submit_free(submit);

+ 1 - 1
drivers/gpu/drm/msm/msm_gpu.c

@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu)
 		submit = list_first_entry(&gpu->submit_list,
 		submit = list_first_entry(&gpu->submit_list,
 				struct msm_gem_submit, node);
 				struct msm_gem_submit, node);
 
 
-		if (fence_is_signaled(submit->fence)) {
+		if (dma_fence_is_signaled(submit->fence)) {
 			retire_submit(gpu, submit);
 			retire_submit(gpu, submit);
 		} else {
 		} else {
 			break;
 			break;

+ 3 - 3
drivers/gpu/drm/nouveau/nouveau_bo.c

@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
 
 
 static void
 static void
 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
-			struct fence *fence)
+			struct dma_fence *fence)
 {
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 
 	if (tile) {
 	if (tile) {
 		spin_lock(&drm->tile.lock);
 		spin_lock(&drm->tile.lock);
-		tile->fence = (struct nouveau_fence *)fence_get(fence);
+		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
 		tile->used = false;
 		tile->used = false;
 		spin_unlock(&drm->tile.lock);
 		spin_unlock(&drm->tile.lock);
 	}
 	}
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 {
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct drm_device *dev = drm->dev;
 	struct drm_device *dev = drm->dev;
-	struct fence *fence = reservation_object_get_excl(bo->resv);
+	struct dma_fence *fence = reservation_object_get_excl(bo->resv);
 
 
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	nv10_bo_put_tile_region(dev, *old_tile, fence);
 	*old_tile = new_tile;
 	*old_tile = new_tile;

+ 40 - 40
drivers/gpu/drm/nouveau/nouveau_fence.c

@@ -28,7 +28,7 @@
 
 
 #include <linux/ktime.h>
 #include <linux/ktime.h>
 #include <linux/hrtimer.h>
 #include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
 
 
 #include <nvif/cl826e.h>
 #include <nvif/cl826e.h>
 #include <nvif/notify.h>
 #include <nvif/notify.h>
@@ -38,11 +38,11 @@
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 #include "nouveau_fence.h"
 
 
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
 
 
 static inline struct nouveau_fence *
 static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
 {
 {
 	return container_of(fence, struct nouveau_fence, base);
 	return container_of(fence, struct nouveau_fence, base);
 }
 }
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
 {
 {
 	int drop = 0;
 	int drop = 0;
 
 
-	fence_signal_locked(&fence->base);
+	dma_fence_signal_locked(&fence->base);
 	list_del(&fence->head);
 	list_del(&fence->head);
 	rcu_assign_pointer(fence->channel, NULL);
 	rcu_assign_pointer(fence->channel, NULL);
 
 
-	if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+	if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
 
 		if (!--fctx->notify_ref)
 		if (!--fctx->notify_ref)
 			drop = 1;
 			drop = 1;
 	}
 	}
 
 
-	fence_put(&fence->base);
+	dma_fence_put(&fence->base);
 	return drop;
 	return drop;
 }
 }
 
 
 static struct nouveau_fence *
 static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
 	struct nouveau_fence_priv *priv = (void*)drm->fence;
 	struct nouveau_fence_priv *priv = (void*)drm->fence;
 
 
 	if (fence->ops != &nouveau_fence_ops_legacy &&
 	if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
 
 
 struct nouveau_fence_work {
 struct nouveau_fence_work {
 	struct work_struct work;
 	struct work_struct work;
-	struct fence_cb cb;
+	struct dma_fence_cb cb;
 	void (*func)(void *);
 	void (*func)(void *);
 	void *data;
 	void *data;
 };
 };
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
 	kfree(work);
 	kfree(work);
 }
 }
 
 
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 {
 	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
 	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
 
 
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
 }
 }
 
 
 void
 void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
 		   void (*func)(void *), void *data)
 		   void (*func)(void *), void *data)
 {
 {
 	struct nouveau_fence_work *work;
 	struct nouveau_fence_work *work;
 
 
-	if (fence_is_signaled(fence))
+	if (dma_fence_is_signaled(fence))
 		goto err;
 		goto err;
 
 
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
 	work->func = func;
 	work->func = func;
 	work->data = data;
 	work->data = data;
 
 
-	if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+	if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
 		goto err_free;
 		goto err_free;
 	return;
 	return;
 
 
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 	fence->timeout  = jiffies + (15 * HZ);
 	fence->timeout  = jiffies + (15 * HZ);
 
 
 	if (priv->uevent)
 	if (priv->uevent)
-		fence_init(&fence->base, &nouveau_fence_ops_uevent,
-			   &fctx->lock, fctx->context, ++fctx->sequence);
+		dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+			       &fctx->lock, fctx->context, ++fctx->sequence);
 	else
 	else
-		fence_init(&fence->base, &nouveau_fence_ops_legacy,
-			   &fctx->lock, fctx->context, ++fctx->sequence);
+		dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+			       &fctx->lock, fctx->context, ++fctx->sequence);
 	kref_get(&fctx->fence_ref);
 	kref_get(&fctx->fence_ref);
 
 
-	trace_fence_emit(&fence->base);
+	trace_dma_fence_emit(&fence->base);
 	ret = fctx->emit(fence);
 	ret = fctx->emit(fence);
 	if (!ret) {
 	if (!ret) {
-		fence_get(&fence->base);
+		dma_fence_get(&fence->base);
 		spin_lock_irq(&fctx->lock);
 		spin_lock_irq(&fctx->lock);
 
 
 		if (nouveau_fence_update(chan, fctx))
 		if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
 		struct nouveau_channel *chan;
 		struct nouveau_channel *chan;
 		unsigned long flags;
 		unsigned long flags;
 
 
-		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 			return true;
 			return true;
 
 
 		spin_lock_irqsave(&fctx->lock, flags);
 		spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
 			nvif_notify_put(&fctx->notify);
 			nvif_notify_put(&fctx->notify);
 		spin_unlock_irqrestore(&fctx->lock, flags);
 		spin_unlock_irqrestore(&fctx->lock, flags);
 	}
 	}
-	return fence_is_signaled(&fence->base);
+	return dma_fence_is_signaled(&fence->base);
 }
 }
 
 
 static long
 static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
 {
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence *fence = from_fence(f);
 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 	if (!lazy)
 	if (!lazy)
 		return nouveau_fence_wait_busy(fence, intr);
 		return nouveau_fence_wait_busy(fence, intr);
 
 
-	ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+	ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 	else if (!ret)
 	else if (!ret)
@@ -391,7 +391,7 @@ int
 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
 {
 {
 	struct nouveau_fence_chan *fctx = chan->fence;
 	struct nouveau_fence_chan *fctx = chan->fence;
-	struct fence *fence;
+	struct dma_fence *fence;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
 	struct nouveau_fence *f;
 	struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		}
 		}
 
 
 		if (must_wait)
 		if (must_wait)
-			ret = fence_wait(fence, intr);
+			ret = dma_fence_wait(fence, intr);
 
 
 		return ret;
 		return ret;
 	}
 	}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 		}
 		}
 
 
 		if (must_wait)
 		if (must_wait)
-			ret = fence_wait(fence, intr);
+			ret = dma_fence_wait(fence, intr);
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -456,7 +456,7 @@ void
 nouveau_fence_unref(struct nouveau_fence **pfence)
 nouveau_fence_unref(struct nouveau_fence **pfence)
 {
 {
 	if (*pfence)
 	if (*pfence)
-		fence_put(&(*pfence)->base);
+		dma_fence_put(&(*pfence)->base);
 	*pfence = NULL;
 	*pfence = NULL;
 }
 }
 
 
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
 	return ret;
 	return ret;
 }
 }
 
 
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
 {
 {
 	return "nouveau";
 	return "nouveau";
 }
 }
 
 
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
 {
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
  * result. The drm node should still be there, so we can derive the index from
  * result. The drm node should still be there, so we can derive the index from
  * the fence context.
  * the fence context.
  */
  */
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
 {
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
 	return ret;
 	return ret;
 }
 }
 
 
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
 {
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence *fence = from_fence(f);
 
 
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
 	WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
 	WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
 
 
 	/*
 	/*
-	 * This needs uevents to work correctly, but fence_add_callback relies on
+	 * This needs uevents to work correctly, but dma_fence_add_callback relies on
 	 * being able to enable signaling. It will still get signaled eventually,
 	 * being able to enable signaling. It will still get signaled eventually,
 	 * just not right away.
 	 * just not right away.
 	 */
 	 */
 	if (nouveau_fence_is_signaled(f)) {
 	if (nouveau_fence_is_signaled(f)) {
 		list_del(&fence->head);
 		list_del(&fence->head);
 
 
-		fence_put(&fence->base);
+		dma_fence_put(&fence->base);
 		return false;
 		return false;
 	}
 	}
 
 
 	return true;
 	return true;
 }
 }
 
 
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
 {
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
 
 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
-	fence_free(&fence->base);
+	dma_fence_free(&fence->base);
 }
 }
 
 
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
 	.get_driver_name = nouveau_fence_get_get_driver_name,
 	.get_driver_name = nouveau_fence_get_get_driver_name,
 	.get_timeline_name = nouveau_fence_get_timeline_name,
 	.get_timeline_name = nouveau_fence_get_timeline_name,
 	.enable_signaling = nouveau_fence_no_signaling,
 	.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
 	.release = nouveau_fence_release
 	.release = nouveau_fence_release
 };
 };
 
 
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
 {
 {
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence *fence = from_fence(f);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
 
 
 	ret = nouveau_fence_no_signaling(f);
 	ret = nouveau_fence_no_signaling(f);
 	if (ret)
 	if (ret)
-		set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+		set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
 	else if (!--fctx->notify_ref)
 	else if (!--fctx->notify_ref)
 		nvif_notify_put(&fctx->notify);
 		nvif_notify_put(&fctx->notify);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
 	.get_driver_name = nouveau_fence_get_get_driver_name,
 	.get_driver_name = nouveau_fence_get_get_driver_name,
 	.get_timeline_name = nouveau_fence_get_timeline_name,
 	.get_timeline_name = nouveau_fence_get_timeline_name,
 	.enable_signaling = nouveau_fence_enable_signaling,
 	.enable_signaling = nouveau_fence_enable_signaling,
 	.signaled = nouveau_fence_is_signaled,
 	.signaled = nouveau_fence_is_signaled,
-	.wait = fence_default_wait,
+	.wait = dma_fence_default_wait,
 	.release = NULL
 	.release = NULL
 };
 };

+ 3 - 3
drivers/gpu/drm/nouveau/nouveau_fence.h

@@ -1,14 +1,14 @@
 #ifndef __NOUVEAU_FENCE_H__
 #ifndef __NOUVEAU_FENCE_H__
 #define __NOUVEAU_FENCE_H__
 #define __NOUVEAU_FENCE_H__
 
 
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
 #include <nvif/notify.h>
 #include <nvif/notify.h>
 
 
 struct nouveau_drm;
 struct nouveau_drm;
 struct nouveau_bo;
 struct nouveau_bo;
 
 
 struct nouveau_fence {
 struct nouveau_fence {
-	struct fence base;
+	struct dma_fence base;
 
 
 	struct list_head head;
 	struct list_head head;
 
 
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
 
 
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 bool nouveau_fence_done(struct nouveau_fence *);
 bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
 int  nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
 
 

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_gem.c

@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object *resv = nvbo->bo.resv;
 	struct reservation_object_list *fobj;
 	struct reservation_object_list *fobj;
-	struct fence *fence = NULL;
+	struct dma_fence *fence = NULL;
 
 
 	fobj = reservation_object_get_list(resv);
 	fobj = reservation_object_get_list(resv);
 
 

+ 1 - 1
drivers/gpu/drm/nouveau/nv04_fence.c

@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv04_fence_context_new;
 	priv->base.context_new = nv04_fence_context_new;
 	priv->base.context_del = nv04_fence_context_del;
 	priv->base.context_del = nv04_fence_context_del;
 	priv->base.contexts = 15;
 	priv->base.contexts = 15;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/gpu/drm/nouveau/nv10_fence.c

@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv10_fence_context_new;
 	priv->base.context_new = nv10_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.contexts = 31;
 	priv->base.contexts = 31;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->lock);
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/gpu/drm/nouveau/nv17_fence.c

@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv17_fence_context_new;
 	priv->base.context_new = nv17_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.contexts = 31;
 	priv->base.contexts = 31;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->lock);
 
 
 	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
 	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,

+ 1 - 1
drivers/gpu/drm/nouveau/nv50_fence.c

@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv50_fence_context_new;
 	priv->base.context_new = nv50_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.context_del = nv10_fence_context_del;
 	priv->base.contexts = 127;
 	priv->base.contexts = 127;
-	priv->base.context_base = fence_context_alloc(priv->base.contexts);
+	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->lock);
 
 
 	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
 	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,

Some files were not shown because too many files changed in this diff