Jelajahi Sumber

drm/ttm: Always and only destroy bo->ttm_resv in ttm_bo_release_list

Fixes a use-after-free due to a race condition in
ttm_bo_cleanup_refs_and_unlock, which allows one task to reserve a BO
and destroy its ttm_resv while another task is waiting for it to signal
in reservation_object_wait_timeout_rcu.

v2:
* Always initialize bo->ttm_resv in ttm_bo_init_reserved
 (Christian König)

Fixes: 0d2bd2ae045d "drm/ttm: fix memory leak while individualizing BOs"
Reviewed-by: Chunming Zhou <david1.zhou@amd.com> # v1
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Michel Dänzer 7 tahun lalu
induk
melakukan
e1fc12c5d9
1 mengubah file dengan 4 tambahan dan 12 penghapusan
  1. 4 12
      drivers/gpu/drm/ttm/ttm_bo.c

+ 4 - 12
drivers/gpu/drm/ttm/ttm_bo.c

@@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
 	ttm_tt_destroy(bo->ttm);
 	ttm_tt_destroy(bo->ttm);
 	atomic_dec(&bo->glob->bo_count);
 	atomic_dec(&bo->glob->bo_count);
 	dma_fence_put(bo->moving);
 	dma_fence_put(bo->moving);
-	if (bo->resv == &bo->ttm_resv)
-		reservation_object_fini(&bo->ttm_resv);
+	reservation_object_fini(&bo->ttm_resv);
 	mutex_destroy(&bo->wu_mutex);
 	mutex_destroy(&bo->wu_mutex);
 	if (bo->destroy)
 	if (bo->destroy)
 		bo->destroy(bo);
 		bo->destroy(bo);
@@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 	if (bo->resv == &bo->ttm_resv)
 	if (bo->resv == &bo->ttm_resv)
 		return 0;
 		return 0;
 
 
-	reservation_object_init(&bo->ttm_resv);
 	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
 	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
 
 
 	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
 	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
-	if (r) {
+	if (r)
 		reservation_object_unlock(&bo->ttm_resv);
 		reservation_object_unlock(&bo->ttm_resv);
-		reservation_object_fini(&bo->ttm_resv);
-	}
 
 
 	return r;
 	return r;
 }
 }
@@ -457,10 +453,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
 		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
 			ttm_bo_del_from_lru(bo);
 			ttm_bo_del_from_lru(bo);
 			spin_unlock(&glob->lru_lock);
 			spin_unlock(&glob->lru_lock);
-			if (bo->resv != &bo->ttm_resv) {
+			if (bo->resv != &bo->ttm_resv)
 				reservation_object_unlock(&bo->ttm_resv);
 				reservation_object_unlock(&bo->ttm_resv);
-				reservation_object_fini(&bo->ttm_resv);
-			}
 
 
 			ttm_bo_cleanup_memtype_use(bo);
 			ttm_bo_cleanup_memtype_use(bo);
 			return;
 			return;
@@ -560,8 +554,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
 	}
 	}
 
 
 	ttm_bo_del_from_lru(bo);
 	ttm_bo_del_from_lru(bo);
-	if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv))
-		reservation_object_fini(&bo->ttm_resv);
 	list_del_init(&bo->ddestroy);
 	list_del_init(&bo->ddestroy);
 	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
 
@@ -1210,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		lockdep_assert_held(&bo->resv->lock.base);
 		lockdep_assert_held(&bo->resv->lock.base);
 	} else {
 	} else {
 		bo->resv = &bo->ttm_resv;
 		bo->resv = &bo->ttm_resv;
-		reservation_object_init(&bo->ttm_resv);
 	}
 	}
+	reservation_object_init(&bo->ttm_resv);
 	atomic_inc(&bo->glob->bo_count);
 	atomic_inc(&bo->glob->bo_count);
 	drm_vma_node_reset(&bo->vma_node);
 	drm_vma_node_reset(&bo->vma_node);
 	bo->priority = 0;
 	bo->priority = 0;