|
@@ -47,8 +47,12 @@
|
|
|
void i915_gem_batch_pool_init(struct drm_device *dev,
|
|
|
struct i915_gem_batch_pool *pool)
|
|
|
{
|
|
|
+ int n;
|
|
|
+
|
|
|
pool->dev = dev;
|
|
|
- INIT_LIST_HEAD(&pool->cache_list);
|
|
|
+
|
|
|
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
|
|
|
+ INIT_LIST_HEAD(&pool->cache_list[n]);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -59,16 +63,20 @@ void i915_gem_batch_pool_init(struct drm_device *dev,
|
|
|
*/
|
|
|
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
|
|
|
{
|
|
|
+ int n;
|
|
|
+
|
|
|
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
|
|
|
|
|
|
- while (!list_empty(&pool->cache_list)) {
|
|
|
- struct drm_i915_gem_object *obj =
|
|
|
- list_first_entry(&pool->cache_list,
|
|
|
- struct drm_i915_gem_object,
|
|
|
- batch_pool_list);
|
|
|
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
|
|
|
+ while (!list_empty(&pool->cache_list[n])) {
|
|
|
+ struct drm_i915_gem_object *obj =
|
|
|
+ list_first_entry(&pool->cache_list[n],
|
|
|
+ struct drm_i915_gem_object,
|
|
|
+ batch_pool_link);
|
|
|
|
|
|
- list_del(&obj->batch_pool_list);
|
|
|
- drm_gem_object_unreference(&obj->base);
|
|
|
+ list_del(&obj->batch_pool_link);
|
|
|
+ drm_gem_object_unreference(&obj->base);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -91,28 +99,33 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
|
|
{
|
|
|
struct drm_i915_gem_object *obj = NULL;
|
|
|
struct drm_i915_gem_object *tmp, *next;
|
|
|
+ struct list_head *list;
|
|
|
+ int n;
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
|
|
|
|
|
|
- list_for_each_entry_safe(tmp, next,
|
|
|
- &pool->cache_list, batch_pool_list) {
|
|
|
+ /* Compute a power-of-two bucket, but throw everything greater than
|
|
|
+ * 16KiB into the same bucket: i.e. the the buckets hold objects of
|
|
|
+ * (1 page, 2 pages, 4 pages, 8+ pages).
|
|
|
+ */
|
|
|
+ n = fls(size >> PAGE_SHIFT) - 1;
|
|
|
+ if (n >= ARRAY_SIZE(pool->cache_list))
|
|
|
+ n = ARRAY_SIZE(pool->cache_list) - 1;
|
|
|
+ list = &pool->cache_list[n];
|
|
|
+
|
|
|
+ list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
|
|
|
/* The batches are strictly LRU ordered */
|
|
|
if (tmp->active)
|
|
|
break;
|
|
|
|
|
|
/* While we're looping, do some clean up */
|
|
|
if (tmp->madv == __I915_MADV_PURGED) {
|
|
|
- list_del(&tmp->batch_pool_list);
|
|
|
+ list_del(&tmp->batch_pool_link);
|
|
|
drm_gem_object_unreference(&tmp->base);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Select a buffer that is at least as big as needed
|
|
|
- * but not 'too much' bigger. A better way to do this
|
|
|
- * might be to bucket the pool objects based on size.
|
|
|
- */
|
|
|
- if (tmp->base.size >= size && tmp->base.size <= 2 * size) {
|
|
|
+ if (tmp->base.size >= size) {
|
|
|
obj = tmp;
|
|
|
break;
|
|
|
}
|
|
@@ -132,7 +145,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
|
|
obj->madv = I915_MADV_DONTNEED;
|
|
|
}
|
|
|
|
|
|
- list_move_tail(&obj->batch_pool_list, &pool->cache_list);
|
|
|
+ list_move_tail(&obj->batch_pool_link, list);
|
|
|
i915_gem_object_pin_pages(obj);
|
|
|
return obj;
|
|
|
}
|