|
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
|
|
|
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
|
|
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
|
|
|
msg);
|
|
msg);
|
|
|
struct completion *compl = rpm_msg->completion;
|
|
struct completion *compl = rpm_msg->completion;
|
|
|
|
|
+ bool free = rpm_msg->needs_free;
|
|
|
|
|
|
|
|
rpm_msg->err = r;
|
|
rpm_msg->err = r;
|
|
|
|
|
|
|
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
|
|
|
complete(compl);
|
|
complete(compl);
|
|
|
|
|
|
|
|
exit:
|
|
exit:
|
|
|
- if (rpm_msg->needs_free)
|
|
|
|
|
|
|
+ if (free)
|
|
|
kfree(rpm_msg);
|
|
kfree(rpm_msg);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -348,11 +349,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
|
{
|
|
{
|
|
|
struct batch_cache_req *req;
|
|
struct batch_cache_req *req;
|
|
|
struct rpmh_request *rpm_msgs;
|
|
struct rpmh_request *rpm_msgs;
|
|
|
- DECLARE_COMPLETION_ONSTACK(compl);
|
|
|
|
|
|
|
+ struct completion *compls;
|
|
|
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
|
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
|
|
unsigned long time_left;
|
|
unsigned long time_left;
|
|
|
int count = 0;
|
|
int count = 0;
|
|
|
- int ret, i, j;
|
|
|
|
|
|
|
+ int ret, i;
|
|
|
|
|
+ void *ptr;
|
|
|
|
|
|
|
|
if (!cmd || !n)
|
|
if (!cmd || !n)
|
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -362,10 +364,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
|
if (!count)
|
|
if (!count)
|
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
|
|
|
|
|
|
|
+ ptr = kzalloc(sizeof(*req) +
|
|
|
|
|
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
|
|
|
GFP_ATOMIC);
|
|
GFP_ATOMIC);
|
|
|
- if (!req)
|
|
|
|
|
|
|
+ if (!ptr)
|
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
+
|
|
|
|
|
+ req = ptr;
|
|
|
|
|
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
|
|
|
|
|
+
|
|
|
req->count = count;
|
|
req->count = count;
|
|
|
rpm_msgs = req->rpm_msgs;
|
|
rpm_msgs = req->rpm_msgs;
|
|
|
|
|
|
|
@@ -380,25 +387,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
for (i = 0; i < count; i++) {
|
|
|
- rpm_msgs[i].completion = &compl;
|
|
|
|
|
|
|
+ struct completion *compl = &compls[i];
|
|
|
|
|
+
|
|
|
|
|
+ init_completion(compl);
|
|
|
|
|
+ rpm_msgs[i].completion = compl;
|
|
|
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
|
|
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
|
|
|
if (ret) {
|
|
if (ret) {
|
|
|
pr_err("Error(%d) sending RPMH message addr=%#x\n",
|
|
pr_err("Error(%d) sending RPMH message addr=%#x\n",
|
|
|
ret, rpm_msgs[i].msg.cmds[0].addr);
|
|
ret, rpm_msgs[i].msg.cmds[0].addr);
|
|
|
- for (j = i; j < count; j++)
|
|
|
|
|
- rpmh_tx_done(&rpm_msgs[j].msg, ret);
|
|
|
|
|
break;
|
|
break;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
time_left = RPMH_TIMEOUT_MS;
|
|
time_left = RPMH_TIMEOUT_MS;
|
|
|
- for (i = 0; i < count; i++) {
|
|
|
|
|
- time_left = wait_for_completion_timeout(&compl, time_left);
|
|
|
|
|
|
|
+ while (i--) {
|
|
|
|
|
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
|
|
|
if (!time_left) {
|
|
if (!time_left) {
|
|
|
/*
|
|
/*
|
|
|
* Better hope they never finish because they'll signal
|
|
* Better hope they never finish because they'll signal
|
|
|
- * the completion on our stack and that's bad once
|
|
|
|
|
- * we've returned from the function.
|
|
|
|
|
|
|
+ * the completion that we're going to free once
|
|
|
|
|
+ * we've returned from this function.
|
|
|
*/
|
|
*/
|
|
|
WARN_ON(1);
|
|
WARN_ON(1);
|
|
|
ret = -ETIMEDOUT;
|
|
ret = -ETIMEDOUT;
|
|
@@ -407,7 +415,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
exit:
|
|
|
- kfree(req);
|
|
|
|
|
|
|
+ kfree(ptr);
|
|
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
|
}
|
|
}
|