|
@@ -28,14 +28,20 @@ nouveau_event_put(struct nouveau_eventh *handler)
|
|
|
{
|
|
|
struct nouveau_event *event = handler->event;
|
|
|
unsigned long flags;
|
|
|
- if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
|
|
|
- spin_lock_irqsave(&event->refs_lock, flags);
|
|
|
- if (!--event->index[handler->index].refs) {
|
|
|
+ u32 m, t;
|
|
|
+
|
|
|
+ if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
|
|
|
+ return;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&event->refs_lock, flags);
|
|
|
+ for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
|
|
|
+ if (!--event->refs[handler->index * event->types_nr + t]) {
|
|
|
if (event->disable)
|
|
|
- event->disable(event, handler->index);
|
|
|
+ event->disable(event, 1 << t, handler->index);
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&event->refs_lock, flags);
|
|
|
+
|
|
|
}
|
|
|
+ spin_unlock_irqrestore(&event->refs_lock, flags);
|
|
|
}
|
|
|
|
|
|
void
|
|
@@ -43,14 +49,20 @@ nouveau_event_get(struct nouveau_eventh *handler)
|
|
|
{
|
|
|
struct nouveau_event *event = handler->event;
|
|
|
unsigned long flags;
|
|
|
- if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
|
|
|
- spin_lock_irqsave(&event->refs_lock, flags);
|
|
|
- if (!event->index[handler->index].refs++) {
|
|
|
+ u32 m, t;
|
|
|
+
|
|
|
+ if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
|
|
|
+ return;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&event->refs_lock, flags);
|
|
|
+ for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
|
|
|
+ if (!event->refs[handler->index * event->types_nr + t]++) {
|
|
|
if (event->enable)
|
|
|
- event->enable(event, handler->index);
|
|
|
+ event->enable(event, 1 << t, handler->index);
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&event->refs_lock, flags);
|
|
|
+
|
|
|
}
|
|
|
+ spin_unlock_irqrestore(&event->refs_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -65,38 +77,47 @@ nouveau_event_fini(struct nouveau_eventh *handler)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-nouveau_event_init(struct nouveau_event *event, int index,
|
|
|
- int (*func)(void *, int), void *priv,
|
|
|
+nouveau_event_init(struct nouveau_event *event, u32 types, int index,
|
|
|
+ int (*func)(void *, u32, int), void *priv,
|
|
|
struct nouveau_eventh *handler)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ if (types & ~((1 << event->types_nr) - 1))
|
|
|
+ return -EINVAL;
|
|
|
if (index >= event->index_nr)
|
|
|
return -EINVAL;
|
|
|
|
|
|
handler->event = event;
|
|
|
handler->flags = 0;
|
|
|
+ handler->types = types;
|
|
|
handler->index = index;
|
|
|
handler->func = func;
|
|
|
handler->priv = priv;
|
|
|
|
|
|
spin_lock_irqsave(&event->list_lock, flags);
|
|
|
- list_add_tail(&handler->head, &event->index[index].list);
|
|
|
+ list_add_tail(&handler->head, &event->list[index]);
|
|
|
spin_unlock_irqrestore(&event->list_lock, flags);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int
|
|
|
-nouveau_event_new(struct nouveau_event *event, int index,
|
|
|
- int (*func)(void *, int), void *priv,
|
|
|
+nouveau_event_new(struct nouveau_event *event, u32 types, int index,
|
|
|
+ int (*func)(void *, u32, int), void *priv,
|
|
|
struct nouveau_eventh **phandler)
|
|
|
{
|
|
|
struct nouveau_eventh *handler;
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
+ if (event->check) {
|
|
|
+ ret = event->check(event, types, index);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
|
|
|
if (handler) {
|
|
|
- ret = nouveau_event_init(event, index, func, priv, handler);
|
|
|
+ ret = nouveau_event_init(event, types, index, func, priv, handler);
|
|
|
if (ret)
|
|
|
kfree(handler);
|
|
|
}
|
|
@@ -116,7 +137,7 @@ nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
|
|
|
}
|
|
|
|
|
|
void
|
|
|
-nouveau_event_trigger(struct nouveau_event *event, int index)
|
|
|
+nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
|
|
|
{
|
|
|
struct nouveau_eventh *handler;
|
|
|
unsigned long flags;
|
|
@@ -125,10 +146,15 @@ nouveau_event_trigger(struct nouveau_event *event, int index)
|
|
|
return;
|
|
|
|
|
|
spin_lock_irqsave(&event->list_lock, flags);
|
|
|
- list_for_each_entry(handler, &event->index[index].list, head) {
|
|
|
- if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
|
|
|
- handler->func(handler->priv, index) == NVKM_EVENT_DROP)
|
|
|
- nouveau_event_put(handler);
|
|
|
+ list_for_each_entry(handler, &event->list[index], head) {
|
|
|
+ if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
|
|
|
+ continue;
|
|
|
+ if (!(handler->types & types))
|
|
|
+ continue;
|
|
|
+ if (handler->func(handler->priv, handler->types & types, index)
|
|
|
+ != NVKM_EVENT_DROP)
|
|
|
+ continue;
|
|
|
+ nouveau_event_put(handler);
|
|
|
}
|
|
|
spin_unlock_irqrestore(&event->list_lock, flags);
|
|
|
}
|
|
@@ -144,20 +170,27 @@ nouveau_event_destroy(struct nouveau_event **pevent)
|
|
|
}
|
|
|
|
|
|
int
|
|
|
-nouveau_event_create(int index_nr, struct nouveau_event **pevent)
|
|
|
+nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
|
|
|
{
|
|
|
struct nouveau_event *event;
|
|
|
int i;
|
|
|
|
|
|
- event = *pevent = kzalloc(sizeof(*event) + index_nr *
|
|
|
- sizeof(event->index[0]), GFP_KERNEL);
|
|
|
+ event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
|
|
|
+ sizeof(event->refs[0]), GFP_KERNEL);
|
|
|
if (!event)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
|
|
|
+ if (!event->list) {
|
|
|
+ kfree(event);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
spin_lock_init(&event->list_lock);
|
|
|
spin_lock_init(&event->refs_lock);
|
|
|
for (i = 0; i < index_nr; i++)
|
|
|
- INIT_LIST_HEAD(&event->index[i].list);
|
|
|
+ INIT_LIST_HEAD(&event->list[i]);
|
|
|
+ event->types_nr = types_nr;
|
|
|
event->index_nr = index_nr;
|
|
|
return 0;
|
|
|
}
|