|
@@ -23,17 +23,13 @@
|
|
|
static DEFINE_MUTEX(clk_lock);
|
|
|
static LIST_HEAD(clk_list);
|
|
|
|
|
|
-static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
|
|
|
+static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
|
|
|
{
|
|
|
struct v4l2_clk *clk;
|
|
|
|
|
|
- list_for_each_entry(clk, &clk_list, list) {
|
|
|
- if (strcmp(dev_id, clk->dev_id))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (!id || !clk->id || !strcmp(clk->id, id))
|
|
|
+ list_for_each_entry(clk, &clk_list, list)
|
|
|
+ if (!strcmp(dev_id, clk->dev_id))
|
|
|
return clk;
|
|
|
- }
|
|
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
}
|
|
@@ -43,7 +39,7 @@ struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
|
|
|
struct v4l2_clk *clk;
|
|
|
|
|
|
mutex_lock(&clk_lock);
|
|
|
- clk = v4l2_clk_find(dev_name(dev), id);
|
|
|
+ clk = v4l2_clk_find(dev_name(dev));
|
|
|
|
|
|
if (!IS_ERR(clk))
|
|
|
atomic_inc(&clk->use_count);
|
|
@@ -127,8 +123,8 @@ void v4l2_clk_disable(struct v4l2_clk *clk)
|
|
|
mutex_lock(&clk->lock);
|
|
|
|
|
|
enable = --clk->enable;
|
|
|
- if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
|
|
|
- clk->dev_id, clk->id))
|
|
|
+ if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
|
|
|
+ clk->dev_id))
|
|
|
clk->enable++;
|
|
|
else if (!enable && clk->ops->disable)
|
|
|
clk->ops->disable(clk);
|
|
@@ -181,7 +177,7 @@ EXPORT_SYMBOL(v4l2_clk_set_rate);
|
|
|
|
|
|
struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
|
|
|
const char *dev_id,
|
|
|
- const char *id, void *priv)
|
|
|
+ void *priv)
|
|
|
{
|
|
|
struct v4l2_clk *clk;
|
|
|
int ret;
|
|
@@ -193,9 +189,8 @@ struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
|
|
|
if (!clk)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- clk->id = kstrdup(id, GFP_KERNEL);
|
|
|
clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
|
|
|
- if ((id && !clk->id) || !clk->dev_id) {
|
|
|
+ if (!clk->dev_id) {
|
|
|
ret = -ENOMEM;
|
|
|
goto ealloc;
|
|
|
}
|
|
@@ -205,7 +200,7 @@ struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
|
|
|
mutex_init(&clk->lock);
|
|
|
|
|
|
mutex_lock(&clk_lock);
|
|
|
- if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
|
|
|
+ if (!IS_ERR(v4l2_clk_find(dev_id))) {
|
|
|
mutex_unlock(&clk_lock);
|
|
|
ret = -EEXIST;
|
|
|
goto eexist;
|
|
@@ -217,7 +212,6 @@ struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
|
|
|
|
|
|
eexist:
|
|
|
ealloc:
|
|
|
- kfree(clk->id);
|
|
|
kfree(clk->dev_id);
|
|
|
kfree(clk);
|
|
|
return ERR_PTR(ret);
|
|
@@ -227,15 +221,14 @@ EXPORT_SYMBOL(v4l2_clk_register);
|
|
|
void v4l2_clk_unregister(struct v4l2_clk *clk)
|
|
|
{
|
|
|
if (WARN(atomic_read(&clk->use_count),
|
|
|
- "%s(): Refusing to unregister ref-counted %s:%s clock!\n",
|
|
|
- __func__, clk->dev_id, clk->id))
|
|
|
+ "%s(): Refusing to unregister ref-counted %s clock!\n",
|
|
|
+ __func__, clk->dev_id))
|
|
|
return;
|
|
|
|
|
|
mutex_lock(&clk_lock);
|
|
|
list_del(&clk->list);
|
|
|
mutex_unlock(&clk_lock);
|
|
|
|
|
|
- kfree(clk->id);
|
|
|
kfree(clk->dev_id);
|
|
|
kfree(clk);
|
|
|
}
|
|
@@ -253,7 +246,7 @@ static unsigned long fixed_get_rate(struct v4l2_clk *clk)
|
|
|
}
|
|
|
|
|
|
struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
|
|
|
- const char *id, unsigned long rate, struct module *owner)
|
|
|
+ unsigned long rate, struct module *owner)
|
|
|
{
|
|
|
struct v4l2_clk *clk;
|
|
|
struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
@@ -265,7 +258,7 @@ struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
|
|
|
priv->ops.get_rate = fixed_get_rate;
|
|
|
priv->ops.owner = owner;
|
|
|
|
|
|
- clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
|
|
|
+ clk = v4l2_clk_register(&priv->ops, dev_id, priv);
|
|
|
if (IS_ERR(clk))
|
|
|
kfree(priv);
|
|
|
|