|
@@ -54,6 +54,20 @@ struct cache_req {
|
|
struct list_head list;
|
|
struct list_head list;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * struct batch_cache_req - An entry in our batch catch
|
|
|
|
+ *
|
|
|
|
+ * @list: linked list obj
|
|
|
|
+ * @count: number of messages
|
|
|
|
+ * @rpm_msgs: the messages
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+struct batch_cache_req {
|
|
|
|
+ struct list_head list;
|
|
|
|
+ int count;
|
|
|
|
+ struct rpmh_request rpm_msgs[];
|
|
|
|
+};
|
|
|
|
+
|
|
static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
|
|
static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
|
|
{
|
|
{
|
|
struct rsc_drv *drv = dev_get_drvdata(dev->parent);
|
|
struct rsc_drv *drv = dev_get_drvdata(dev->parent);
|
|
@@ -73,10 +87,13 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
|
|
dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
|
|
dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
|
|
rpm_msg->msg.cmds[0].addr, r);
|
|
rpm_msg->msg.cmds[0].addr, r);
|
|
|
|
|
|
|
|
+ if (!compl)
|
|
|
|
+ goto exit;
|
|
|
|
+
|
|
/* Signal the blocking thread we are done */
|
|
/* Signal the blocking thread we are done */
|
|
- if (compl)
|
|
|
|
- complete(compl);
|
|
|
|
|
|
+ complete(compl);
|
|
|
|
|
|
|
|
+exit:
|
|
if (rpm_msg->needs_free)
|
|
if (rpm_msg->needs_free)
|
|
kfree(rpm_msg);
|
|
kfree(rpm_msg);
|
|
}
|
|
}
|
|
@@ -264,6 +281,138 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(rpmh_write);
|
|
EXPORT_SYMBOL(rpmh_write);
|
|
|
|
|
|
|
|
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
|
|
|
|
+ list_add_tail(&req->list, &ctrlr->batch_cache);
|
|
|
|
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
|
|
|
|
+{
|
|
|
|
+ struct batch_cache_req *req;
|
|
|
|
+ const struct rpmh_request *rpm_msg;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ int ret = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ /* Send Sleep/Wake requests to the controller, expect no response */
|
|
|
|
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
|
|
|
|
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
|
|
|
|
+ for (i = 0; i < req->count; i++) {
|
|
|
|
+ rpm_msg = req->rpm_msgs + i;
|
|
|
|
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
|
|
|
|
+ &rpm_msg->msg);
|
|
|
|
+ if (ret)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
|
|
|
|
+{
|
|
|
|
+ struct batch_cache_req *req, *tmp;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
|
|
|
|
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
|
|
|
|
+ kfree(req);
|
|
|
|
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
|
|
|
|
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
|
|
|
|
+ * batch to finish.
|
|
|
|
+ *
|
|
|
|
+ * @dev: the device making the request
|
|
|
|
+ * @state: Active/sleep set
|
|
|
|
+ * @cmd: The payload data
|
|
|
|
+ * @n: The array of count of elements in each batch, 0 terminated.
|
|
|
|
+ *
|
|
|
|
+ * Write a request to the RSC controller without caching. If the request
|
|
|
|
+ * state is ACTIVE, then the requests are treated as completion request
|
|
|
|
+ * and sent to the controller immediately. The function waits until all the
|
|
|
|
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
|
|
|
|
+ * request is sent as fire-n-forget and no ack is expected.
|
|
|
|
+ *
|
|
|
|
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
|
|
|
|
+ */
|
|
|
|
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
|
|
+ const struct tcs_cmd *cmd, u32 *n)
|
|
|
|
+{
|
|
|
|
+ struct batch_cache_req *req;
|
|
|
|
+ struct rpmh_request *rpm_msgs;
|
|
|
|
+ DECLARE_COMPLETION_ONSTACK(compl);
|
|
|
|
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
|
|
|
+ unsigned long time_left;
|
|
|
|
+ int count = 0;
|
|
|
|
+ int ret, i, j;
|
|
|
|
+
|
|
|
|
+ if (!cmd || !n)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ while (n[count] > 0)
|
|
|
|
+ count++;
|
|
|
|
+ if (!count)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
|
|
|
|
+ GFP_ATOMIC);
|
|
|
|
+ if (!req)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ req->count = count;
|
|
|
|
+ rpm_msgs = req->rpm_msgs;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
|
|
|
|
+ cmd += n[i];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
|
|
|
|
+ cache_batch(ctrlr, req);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ rpm_msgs[i].completion = &compl;
|
|
|
|
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
|
|
|
|
+ if (ret) {
|
|
|
|
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
|
|
|
|
+ ret, rpm_msgs[i].msg.cmds[0].addr);
|
|
|
|
+ for (j = i; j < count; j++)
|
|
|
|
+ rpmh_tx_done(&rpm_msgs[j].msg, ret);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ time_left = RPMH_TIMEOUT_MS;
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ time_left = wait_for_completion_timeout(&compl, time_left);
|
|
|
|
+ if (!time_left) {
|
|
|
|
+ /*
|
|
|
|
+ * Better hope they never finish because they'll signal
|
|
|
|
+ * the completion on our stack and that's bad once
|
|
|
|
+ * we've returned from the function.
|
|
|
|
+ */
|
|
|
|
+ WARN_ON(1);
|
|
|
|
+ ret = -ETIMEDOUT;
|
|
|
|
+ goto exit;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+exit:
|
|
|
|
+ kfree(req);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(rpmh_write_batch);
|
|
|
|
+
|
|
static int is_req_valid(struct cache_req *req)
|
|
static int is_req_valid(struct cache_req *req)
|
|
{
|
|
{
|
|
return (req->sleep_val != UINT_MAX &&
|
|
return (req->sleep_val != UINT_MAX &&
|
|
@@ -309,6 +458,11 @@ int rpmh_flush(const struct device *dev)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* First flush the cached batch requests */
|
|
|
|
+ ret = flush_batch(ctrlr);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Nobody else should be calling this function other than system PM,
|
|
* Nobody else should be calling this function other than system PM,
|
|
* hence we can run without locks.
|
|
* hence we can run without locks.
|
|
@@ -347,6 +501,7 @@ int rpmh_invalidate(const struct device *dev)
|
|
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
|
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
+ invalidate_batch(ctrlr);
|
|
ctrlr->dirty = true;
|
|
ctrlr->dirty = true;
|
|
|
|
|
|
do {
|
|
do {
|