|
@@ -101,19 +101,6 @@ struct omap_gem_object {
|
|
* Virtual address, if mapped.
|
|
* Virtual address, if mapped.
|
|
*/
|
|
*/
|
|
void *vaddr;
|
|
void *vaddr;
|
|
-
|
|
|
|
- /**
|
|
|
|
- * sync-object allocated on demand (if needed)
|
|
|
|
- *
|
|
|
|
- * Per-buffer sync-object for tracking pending and completed hw/dma
|
|
|
|
- * read and write operations.
|
|
|
|
- */
|
|
|
|
- struct {
|
|
|
|
- uint32_t write_pending;
|
|
|
|
- uint32_t write_complete;
|
|
|
|
- uint32_t read_pending;
|
|
|
|
- uint32_t read_complete;
|
|
|
|
- } *sync;
|
|
|
|
};
|
|
};
|
|
|
|
|
|
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
|
|
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
|
|
@@ -1070,205 +1057,6 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-/* -----------------------------------------------------------------------------
|
|
|
|
- * Buffer Synchronization
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-static DEFINE_SPINLOCK(sync_lock);
|
|
|
|
-
|
|
|
|
-struct omap_gem_sync_waiter {
|
|
|
|
- struct list_head list;
|
|
|
|
- struct omap_gem_object *omap_obj;
|
|
|
|
- enum omap_gem_op op;
|
|
|
|
- uint32_t read_target, write_target;
|
|
|
|
- /* notify called w/ sync_lock held */
|
|
|
|
- void (*notify)(void *arg);
|
|
|
|
- void *arg;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
|
|
|
|
- * the read and/or write target count is achieved which can call a user
|
|
|
|
- * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
|
|
|
|
- * cpu access), etc.
|
|
|
|
- */
|
|
|
|
-static LIST_HEAD(waiters);
|
|
|
|
-
|
|
|
|
-static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
|
|
|
|
-{
|
|
|
|
- struct omap_gem_object *omap_obj = waiter->omap_obj;
|
|
|
|
- if ((waiter->op & OMAP_GEM_READ) &&
|
|
|
|
- (omap_obj->sync->write_complete < waiter->write_target))
|
|
|
|
- return true;
|
|
|
|
- if ((waiter->op & OMAP_GEM_WRITE) &&
|
|
|
|
- (omap_obj->sync->read_complete < waiter->read_target))
|
|
|
|
- return true;
|
|
|
|
- return false;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* macro for sync debug.. */
|
|
|
|
-#define SYNCDBG 0
|
|
|
|
-#define SYNC(fmt, ...) do { if (SYNCDBG) \
|
|
|
|
- pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \
|
|
|
|
- } while (0)
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-static void sync_op_update(void)
|
|
|
|
-{
|
|
|
|
- struct omap_gem_sync_waiter *waiter, *n;
|
|
|
|
- list_for_each_entry_safe(waiter, n, &waiters, list) {
|
|
|
|
- if (!is_waiting(waiter)) {
|
|
|
|
- list_del(&waiter->list);
|
|
|
|
- SYNC("notify: %p", waiter);
|
|
|
|
- waiter->notify(waiter->arg);
|
|
|
|
- kfree(waiter);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int sync_op(struct drm_gem_object *obj,
|
|
|
|
- enum omap_gem_op op, bool start)
|
|
|
|
-{
|
|
|
|
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
|
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- spin_lock(&sync_lock);
|
|
|
|
-
|
|
|
|
- if (!omap_obj->sync) {
|
|
|
|
- omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
|
|
|
|
- if (!omap_obj->sync) {
|
|
|
|
- ret = -ENOMEM;
|
|
|
|
- goto unlock;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (start) {
|
|
|
|
- if (op & OMAP_GEM_READ)
|
|
|
|
- omap_obj->sync->read_pending++;
|
|
|
|
- if (op & OMAP_GEM_WRITE)
|
|
|
|
- omap_obj->sync->write_pending++;
|
|
|
|
- } else {
|
|
|
|
- if (op & OMAP_GEM_READ)
|
|
|
|
- omap_obj->sync->read_complete++;
|
|
|
|
- if (op & OMAP_GEM_WRITE)
|
|
|
|
- omap_obj->sync->write_complete++;
|
|
|
|
- sync_op_update();
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
-unlock:
|
|
|
|
- spin_unlock(&sync_lock);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* mark the start of read and/or write operation */
|
|
|
|
-int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
|
|
|
|
-{
|
|
|
|
- return sync_op(obj, op, true);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
|
|
|
|
-{
|
|
|
|
- return sync_op(obj, op, false);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static DECLARE_WAIT_QUEUE_HEAD(sync_event);
|
|
|
|
-
|
|
|
|
-static void sync_notify(void *arg)
|
|
|
|
-{
|
|
|
|
- struct task_struct **waiter_task = arg;
|
|
|
|
- *waiter_task = NULL;
|
|
|
|
- wake_up_all(&sync_event);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
|
|
|
|
-{
|
|
|
|
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
|
|
|
- int ret = 0;
|
|
|
|
- if (omap_obj->sync) {
|
|
|
|
- struct task_struct *waiter_task = current;
|
|
|
|
- struct omap_gem_sync_waiter *waiter =
|
|
|
|
- kzalloc(sizeof(*waiter), GFP_KERNEL);
|
|
|
|
-
|
|
|
|
- if (!waiter)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- waiter->omap_obj = omap_obj;
|
|
|
|
- waiter->op = op;
|
|
|
|
- waiter->read_target = omap_obj->sync->read_pending;
|
|
|
|
- waiter->write_target = omap_obj->sync->write_pending;
|
|
|
|
- waiter->notify = sync_notify;
|
|
|
|
- waiter->arg = &waiter_task;
|
|
|
|
-
|
|
|
|
- spin_lock(&sync_lock);
|
|
|
|
- if (is_waiting(waiter)) {
|
|
|
|
- SYNC("waited: %p", waiter);
|
|
|
|
- list_add_tail(&waiter->list, &waiters);
|
|
|
|
- spin_unlock(&sync_lock);
|
|
|
|
- ret = wait_event_interruptible(sync_event,
|
|
|
|
- (waiter_task == NULL));
|
|
|
|
- spin_lock(&sync_lock);
|
|
|
|
- if (waiter_task) {
|
|
|
|
- SYNC("interrupted: %p", waiter);
|
|
|
|
- /* we were interrupted */
|
|
|
|
- list_del(&waiter->list);
|
|
|
|
- waiter_task = NULL;
|
|
|
|
- } else {
|
|
|
|
- /* freed in sync_op_update() */
|
|
|
|
- waiter = NULL;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- spin_unlock(&sync_lock);
|
|
|
|
- kfree(waiter);
|
|
|
|
- }
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* call fxn(arg), either synchronously or asynchronously if the op
|
|
|
|
- * is currently blocked.. fxn() can be called from any context
|
|
|
|
- *
|
|
|
|
- * (TODO for now fxn is called back from whichever context calls
|
|
|
|
- * omap_gem_op_finish().. but this could be better defined later
|
|
|
|
- * if needed)
|
|
|
|
- *
|
|
|
|
- * TODO more code in common w/ _sync()..
|
|
|
|
- */
|
|
|
|
-int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
|
|
|
|
- void (*fxn)(void *arg), void *arg)
|
|
|
|
-{
|
|
|
|
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
|
|
|
- if (omap_obj->sync) {
|
|
|
|
- struct omap_gem_sync_waiter *waiter =
|
|
|
|
- kzalloc(sizeof(*waiter), GFP_ATOMIC);
|
|
|
|
-
|
|
|
|
- if (!waiter)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- waiter->omap_obj = omap_obj;
|
|
|
|
- waiter->op = op;
|
|
|
|
- waiter->read_target = omap_obj->sync->read_pending;
|
|
|
|
- waiter->write_target = omap_obj->sync->write_pending;
|
|
|
|
- waiter->notify = fxn;
|
|
|
|
- waiter->arg = arg;
|
|
|
|
-
|
|
|
|
- spin_lock(&sync_lock);
|
|
|
|
- if (is_waiting(waiter)) {
|
|
|
|
- SYNC("waited: %p", waiter);
|
|
|
|
- list_add_tail(&waiter->list, &waiters);
|
|
|
|
- spin_unlock(&sync_lock);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- spin_unlock(&sync_lock);
|
|
|
|
-
|
|
|
|
- kfree(waiter);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* no waiting.. */
|
|
|
|
- fxn(arg);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* -----------------------------------------------------------------------------
|
|
/* -----------------------------------------------------------------------------
|
|
* Constructor & Destructor
|
|
* Constructor & Destructor
|
|
*/
|
|
*/
|
|
@@ -1308,8 +1096,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
|
|
drm_prime_gem_destroy(obj, omap_obj->sgt);
|
|
drm_prime_gem_destroy(obj, omap_obj->sgt);
|
|
}
|
|
}
|
|
|
|
|
|
- kfree(omap_obj->sync);
|
|
|
|
-
|
|
|
|
drm_gem_object_release(obj);
|
|
drm_gem_object_release(obj);
|
|
|
|
|
|
kfree(omap_obj);
|
|
kfree(omap_obj);
|