|
@@ -32,20 +32,12 @@
|
|
|
#include <linux/sched.h>
|
|
|
#include <linux/module.h>
|
|
|
|
|
|
-static void ttm_eu_backoff_reservation_locked(struct list_head *list)
|
|
|
+static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
|
|
|
+ struct ttm_validate_buffer *entry)
|
|
|
{
|
|
|
- struct ttm_validate_buffer *entry;
|
|
|
-
|
|
|
- list_for_each_entry(entry, list, head) {
|
|
|
+ list_for_each_entry_continue_reverse(entry, list, head) {
|
|
|
struct ttm_buffer_object *bo = entry->bo;
|
|
|
- if (!entry->reserved)
|
|
|
- continue;
|
|
|
|
|
|
- entry->reserved = false;
|
|
|
- if (entry->removed) {
|
|
|
- ttm_bo_add_to_lru(bo);
|
|
|
- entry->removed = false;
|
|
|
- }
|
|
|
__ttm_bo_unreserve(bo);
|
|
|
}
|
|
|
}
|
|
@@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
|
|
|
|
|
|
list_for_each_entry(entry, list, head) {
|
|
|
struct ttm_buffer_object *bo = entry->bo;
|
|
|
- if (!entry->reserved)
|
|
|
- continue;
|
|
|
+ unsigned put_count = ttm_bo_del_from_lru(bo);
|
|
|
|
|
|
- if (!entry->removed) {
|
|
|
- entry->put_count = ttm_bo_del_from_lru(bo);
|
|
|
- entry->removed = true;
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void ttm_eu_list_ref_sub(struct list_head *list)
|
|
|
-{
|
|
|
- struct ttm_validate_buffer *entry;
|
|
|
-
|
|
|
- list_for_each_entry(entry, list, head) {
|
|
|
- struct ttm_buffer_object *bo = entry->bo;
|
|
|
-
|
|
|
- if (entry->put_count) {
|
|
|
- ttm_bo_list_ref_sub(bo, entry->put_count, true);
|
|
|
- entry->put_count = 0;
|
|
|
- }
|
|
|
+ ttm_bo_list_ref_sub(bo, put_count, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|
|
|
|
|
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
|
|
glob = entry->bo->glob;
|
|
|
+
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
- ttm_eu_backoff_reservation_locked(list);
|
|
|
+ list_for_each_entry(entry, list, head) {
|
|
|
+ struct ttm_buffer_object *bo = entry->bo;
|
|
|
+
|
|
|
+ ttm_bo_add_to_lru(bo);
|
|
|
+ __ttm_bo_unreserve(bo);
|
|
|
+ }
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
+
|
|
|
if (ticket)
|
|
|
ww_acquire_fini(ticket);
|
|
|
- spin_unlock(&glob->lru_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
|
|
|
|
@@ -121,64 +102,55 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
|
|
if (list_empty(list))
|
|
|
return 0;
|
|
|
|
|
|
- list_for_each_entry(entry, list, head) {
|
|
|
- entry->reserved = false;
|
|
|
- entry->put_count = 0;
|
|
|
- entry->removed = false;
|
|
|
- }
|
|
|
-
|
|
|
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
|
|
glob = entry->bo->glob;
|
|
|
|
|
|
if (ticket)
|
|
|
ww_acquire_init(ticket, &reservation_ww_class);
|
|
|
-retry:
|
|
|
+
|
|
|
list_for_each_entry(entry, list, head) {
|
|
|
struct ttm_buffer_object *bo = entry->bo;
|
|
|
|
|
|
- /* already slowpath reserved? */
|
|
|
- if (entry->reserved)
|
|
|
- continue;
|
|
|
-
|
|
|
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
|
|
|
ticket);
|
|
|
+ if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
|
|
+ __ttm_bo_unreserve(bo);
|
|
|
|
|
|
- if (ret == -EDEADLK) {
|
|
|
- /* uh oh, we lost out, drop every reservation and try
|
|
|
- * to only reserve this buffer, then start over if
|
|
|
- * this succeeds.
|
|
|
- */
|
|
|
- BUG_ON(ticket == NULL);
|
|
|
- spin_lock(&glob->lru_lock);
|
|
|
- ttm_eu_backoff_reservation_locked(list);
|
|
|
- spin_unlock(&glob->lru_lock);
|
|
|
- ttm_eu_list_ref_sub(list);
|
|
|
-
|
|
|
- if (intr) {
|
|
|
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
|
|
- ticket);
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
- if (ret == -EINTR)
|
|
|
- ret = -ERESTARTSYS;
|
|
|
- goto err_fini;
|
|
|
- }
|
|
|
- } else
|
|
|
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
|
|
|
-
|
|
|
- entry->reserved = true;
|
|
|
- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
|
|
- ret = -EBUSY;
|
|
|
- goto err;
|
|
|
- }
|
|
|
- goto retry;
|
|
|
- } else if (ret)
|
|
|
- goto err;
|
|
|
-
|
|
|
- entry->reserved = true;
|
|
|
- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
|
|
ret = -EBUSY;
|
|
|
- goto err;
|
|
|
}
|
|
|
+
|
|
|
+ if (!ret)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* uh oh, we lost out, drop every reservation and try
|
|
|
+ * to only reserve this buffer, then start over if
|
|
|
+ * this succeeds.
|
|
|
+ */
|
|
|
+ ttm_eu_backoff_reservation_reverse(list, entry);
|
|
|
+
|
|
|
+ if (ret == -EDEADLK && intr) {
|
|
|
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
|
|
+ ticket);
|
|
|
+ } else if (ret == -EDEADLK) {
|
|
|
+ ww_mutex_lock_slow(&bo->resv->lock, ticket);
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
+ if (ret == -EINTR)
|
|
|
+ ret = -ERESTARTSYS;
|
|
|
+ if (ticket) {
|
|
|
+ ww_acquire_done(ticket);
|
|
|
+ ww_acquire_fini(ticket);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* move this item to the front of the list,
|
|
|
+ * forces correct iteration of the loop without keeping track
|
|
|
+ */
|
|
|
+ list_del(&entry->head);
|
|
|
+ list_add(&entry->head, list);
|
|
|
}
|
|
|
|
|
|
if (ticket)
|
|
@@ -186,20 +158,7 @@ retry:
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
ttm_eu_del_from_lru_locked(list);
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
- ttm_eu_list_ref_sub(list);
|
|
|
return 0;
|
|
|
-
|
|
|
-err:
|
|
|
- spin_lock(&glob->lru_lock);
|
|
|
- ttm_eu_backoff_reservation_locked(list);
|
|
|
- spin_unlock(&glob->lru_lock);
|
|
|
- ttm_eu_list_ref_sub(list);
|
|
|
-err_fini:
|
|
|
- if (ticket) {
|
|
|
- ww_acquire_done(ticket);
|
|
|
- ww_acquire_fini(ticket);
|
|
|
- }
|
|
|
- return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
|
|
|
|
@@ -228,7 +187,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
|
|
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
|
|
ttm_bo_add_to_lru(bo);
|
|
|
__ttm_bo_unreserve(bo);
|
|
|
- entry->reserved = false;
|
|
|
}
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
if (ticket)
|