|
|
@@ -29,20 +29,21 @@
|
|
|
* mei_me_cl_by_uuid - locate index of me client
|
|
|
*
|
|
|
* @dev: mei device
|
|
|
+ *
|
|
|
+ * Locking: called under "dev->device_lock" lock
|
|
|
+ *
|
|
|
* returns me client index or -ENOENT if not found
|
|
|
*/
|
|
|
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
|
|
|
{
|
|
|
- int i, res = -ENOENT;
|
|
|
+ int i;
|
|
|
|
|
|
for (i = 0; i < dev->me_clients_num; ++i)
|
|
|
if (uuid_le_cmp(*uuid,
|
|
|
- dev->me_clients[i].props.protocol_name) == 0) {
|
|
|
- res = i;
|
|
|
- break;
|
|
|
- }
|
|
|
+ dev->me_clients[i].props.protocol_name) == 0)
|
|
|
+ return i;
|
|
|
|
|
|
- return res;
|
|
|
+ return -ENOENT;
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -60,36 +61,78 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
|
|
|
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < dev->me_clients_num; i++)
|
|
|
if (dev->me_clients[i].client_id == client_id)
|
|
|
- break;
|
|
|
- if (WARN_ON(dev->me_clients[i].client_id != client_id))
|
|
|
- return -ENOENT;
|
|
|
+ return i;
|
|
|
|
|
|
- if (i == dev->me_clients_num)
|
|
|
- return -ENOENT;
|
|
|
-
|
|
|
- return i;
|
|
|
+ return -ENOENT;
|
|
|
}
|
|
|
|
|
|
|
|
|
/**
|
|
|
- * mei_io_list_flush - removes list entry belonging to cl.
|
|
|
+ * mei_cl_cmp_id - tells if the clients are the same
|
|
|
*
|
|
|
- * @list: An instance of our list structure
|
|
|
- * @cl: host client
|
|
|
+ * @cl1: host client 1
|
|
|
+ * @cl2: host client 2
|
|
|
+ *
|
|
|
+ * returns true - if the clients has same host and me ids
|
|
|
+ * false - otherwise
|
|
|
+ */
|
|
|
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
|
|
|
+ const struct mei_cl *cl2)
|
|
|
+{
|
|
|
+ return cl1 && cl2 &&
|
|
|
+ (cl1->host_client_id == cl2->host_client_id) &&
|
|
|
+ (cl1->me_client_id == cl2->me_client_id);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * mei_io_list_flush - removes cbs belonging to cl.
|
|
|
+ *
|
|
|
+ * @list: an instance of our list structure
|
|
|
+ * @cl: host client, can be NULL for flushing the whole list
|
|
|
+ * @free: whether to free the cbs
|
|
|
*/
|
|
|
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
|
|
|
+static void __mei_io_list_flush(struct mei_cl_cb *list,
|
|
|
+ struct mei_cl *cl, bool free)
|
|
|
{
|
|
|
struct mei_cl_cb *cb;
|
|
|
struct mei_cl_cb *next;
|
|
|
|
|
|
+ /* enable removing everything if no cl is specified */
|
|
|
list_for_each_entry_safe(cb, next, &list->list, list) {
|
|
|
- if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
|
|
|
+ if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
|
|
|
list_del(&cb->list);
|
|
|
+ if (free)
|
|
|
+ mei_io_cb_free(cb);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * mei_io_list_flush - removes list entry belonging to cl.
|
|
|
+ *
|
|
|
+ * @list: An instance of our list structure
|
|
|
+ * @cl: host client
|
|
|
+ */
|
|
|
+static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
|
|
|
+{
|
|
|
+ __mei_io_list_flush(list, cl, false);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * mei_io_list_free - removes cb belonging to cl and free them
|
|
|
+ *
|
|
|
+ * @list: An instance of our list structure
|
|
|
+ * @cl: host client
|
|
|
+ */
|
|
|
+static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
|
|
|
+{
|
|
|
+ __mei_io_list_flush(list, cl, true);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* mei_io_cb_free - free mei_cb_private related memory
|
|
|
*
|
|
|
@@ -196,8 +239,8 @@ int mei_cl_flush_queues(struct mei_cl *cl)
|
|
|
|
|
|
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
|
|
|
mei_io_list_flush(&cl->dev->read_list, cl);
|
|
|
- mei_io_list_flush(&cl->dev->write_list, cl);
|
|
|
- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
|
|
|
+ mei_io_list_free(&cl->dev->write_list, cl);
|
|
|
+ mei_io_list_free(&cl->dev->write_waiting_list, cl);
|
|
|
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
|
|
|
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
|
|
|
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
|
|
|
@@ -254,10 +297,9 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
|
|
|
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
|
|
|
{
|
|
|
struct mei_device *dev = cl->dev;
|
|
|
- struct mei_cl_cb *cb = NULL;
|
|
|
- struct mei_cl_cb *next = NULL;
|
|
|
+ struct mei_cl_cb *cb;
|
|
|
|
|
|
- list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
|
|
|
+ list_for_each_entry(cb, &dev->read_list.list, list)
|
|
|
if (mei_cl_cmp_id(cl, cb->cl))
|
|
|
return cb;
|
|
|
return NULL;
|
|
|
@@ -375,6 +417,23 @@ void mei_host_client_init(struct work_struct *work)
|
|
|
mutex_unlock(&dev->device_lock);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * mei_hbuf_acquire: try to acquire host buffer
|
|
|
+ *
|
|
|
+ * @dev: the device structure
|
|
|
+ * returns true if host buffer was acquired
|
|
|
+ */
|
|
|
+bool mei_hbuf_acquire(struct mei_device *dev)
|
|
|
+{
|
|
|
+ if (!dev->hbuf_is_ready) {
|
|
|
+ dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev->hbuf_is_ready = false;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* mei_cl_disconnect - disconnect host client from the me one
|
|
|
@@ -406,8 +465,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
cb->fop_type = MEI_FOP_CLOSE;
|
|
|
- if (dev->hbuf_is_ready) {
|
|
|
- dev->hbuf_is_ready = false;
|
|
|
+ if (mei_hbuf_acquire(dev)) {
|
|
|
if (mei_hbm_cl_disconnect_req(dev, cl)) {
|
|
|
rets = -ENODEV;
|
|
|
cl_err(dev, cl, "failed to disconnect.\n");
|
|
|
@@ -461,17 +519,17 @@ free:
|
|
|
bool mei_cl_is_other_connecting(struct mei_cl *cl)
|
|
|
{
|
|
|
struct mei_device *dev;
|
|
|
- struct mei_cl *pos;
|
|
|
- struct mei_cl *next;
|
|
|
+ struct mei_cl *ocl; /* the other client */
|
|
|
|
|
|
if (WARN_ON(!cl || !cl->dev))
|
|
|
return false;
|
|
|
|
|
|
dev = cl->dev;
|
|
|
|
|
|
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
|
|
|
- if ((pos->state == MEI_FILE_CONNECTING) &&
|
|
|
- (pos != cl) && cl->me_client_id == pos->me_client_id)
|
|
|
+ list_for_each_entry(ocl, &dev->file_list, link) {
|
|
|
+ if (ocl->state == MEI_FILE_CONNECTING &&
|
|
|
+ ocl != cl &&
|
|
|
+ cl->me_client_id == ocl->me_client_id)
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
@@ -505,11 +563,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- cb->fop_type = MEI_FOP_IOCTL;
|
|
|
-
|
|
|
- if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
|
|
|
- dev->hbuf_is_ready = false;
|
|
|
+ cb->fop_type = MEI_FOP_CONNECT;
|
|
|
|
|
|
+ /* run hbuf acquire last so we don't have to undo */
|
|
|
+ if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
|
|
|
if (mei_hbm_cl_connect_req(dev, cl)) {
|
|
|
rets = -ENODEV;
|
|
|
goto out;
|
|
|
@@ -521,18 +578,19 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
|
|
|
}
|
|
|
|
|
|
mutex_unlock(&dev->device_lock);
|
|
|
- rets = wait_event_timeout(dev->wait_recvd_msg,
|
|
|
- (cl->state == MEI_FILE_CONNECTED ||
|
|
|
- cl->state == MEI_FILE_DISCONNECTED),
|
|
|
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
|
|
+ wait_event_timeout(dev->wait_recvd_msg,
|
|
|
+ (cl->state == MEI_FILE_CONNECTED ||
|
|
|
+ cl->state == MEI_FILE_DISCONNECTED),
|
|
|
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
|
|
|
mutex_lock(&dev->device_lock);
|
|
|
|
|
|
if (cl->state != MEI_FILE_CONNECTED) {
|
|
|
- rets = -EFAULT;
|
|
|
+ /* something went really wrong */
|
|
|
+ if (!cl->status)
|
|
|
+ cl->status = -EFAULT;
|
|
|
|
|
|
mei_io_list_flush(&dev->ctrl_rd_list, cl);
|
|
|
mei_io_list_flush(&dev->ctrl_wr_list, cl);
|
|
|
- goto out;
|
|
|
}
|
|
|
|
|
|
rets = cl->status;
|
|
|
@@ -554,7 +612,8 @@ out:
|
|
|
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
|
|
|
{
|
|
|
struct mei_device *dev;
|
|
|
- int i;
|
|
|
+ struct mei_me_client *me_cl;
|
|
|
+ int id;
|
|
|
|
|
|
if (WARN_ON(!cl || !cl->dev))
|
|
|
return -EINVAL;
|
|
|
@@ -567,19 +626,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
|
|
|
if (cl->mei_flow_ctrl_creds > 0)
|
|
|
return 1;
|
|
|
|
|
|
- for (i = 0; i < dev->me_clients_num; i++) {
|
|
|
- struct mei_me_client *me_cl = &dev->me_clients[i];
|
|
|
- if (me_cl->client_id == cl->me_client_id) {
|
|
|
- if (me_cl->mei_flow_ctrl_creds) {
|
|
|
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
|
|
|
- return -EINVAL;
|
|
|
- return 1;
|
|
|
- } else {
|
|
|
- return 0;
|
|
|
- }
|
|
|
- }
|
|
|
+ id = mei_me_cl_by_id(dev, cl->me_client_id);
|
|
|
+ if (id < 0) {
|
|
|
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
|
|
|
+ return id;
|
|
|
}
|
|
|
- return -ENOENT;
|
|
|
+
|
|
|
+ me_cl = &dev->me_clients[id];
|
|
|
+ if (me_cl->mei_flow_ctrl_creds) {
|
|
|
+ if (WARN_ON(me_cl->props.single_recv_buf == 0))
|
|
|
+ return -EINVAL;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -595,32 +654,31 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
|
|
|
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
|
|
|
{
|
|
|
struct mei_device *dev;
|
|
|
- int i;
|
|
|
+ struct mei_me_client *me_cl;
|
|
|
+ int id;
|
|
|
|
|
|
if (WARN_ON(!cl || !cl->dev))
|
|
|
return -EINVAL;
|
|
|
|
|
|
dev = cl->dev;
|
|
|
|
|
|
- if (!dev->me_clients_num)
|
|
|
- return -ENOENT;
|
|
|
+ id = mei_me_cl_by_id(dev, cl->me_client_id);
|
|
|
+ if (id < 0) {
|
|
|
+ cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
|
|
|
+ return id;
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < dev->me_clients_num; i++) {
|
|
|
- struct mei_me_client *me_cl = &dev->me_clients[i];
|
|
|
- if (me_cl->client_id == cl->me_client_id) {
|
|
|
- if (me_cl->props.single_recv_buf != 0) {
|
|
|
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
|
|
|
- return -EINVAL;
|
|
|
- dev->me_clients[i].mei_flow_ctrl_creds--;
|
|
|
- } else {
|
|
|
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
|
|
|
- return -EINVAL;
|
|
|
- cl->mei_flow_ctrl_creds--;
|
|
|
- }
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ me_cl = &dev->me_clients[id];
|
|
|
+ if (me_cl->props.single_recv_buf != 0) {
|
|
|
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
|
|
|
+ return -EINVAL;
|
|
|
+ me_cl->mei_flow_ctrl_creds--;
|
|
|
+ } else {
|
|
|
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
|
|
|
+ return -EINVAL;
|
|
|
+ cl->mei_flow_ctrl_creds--;
|
|
|
}
|
|
|
- return -ENOENT;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -652,7 +710,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
|
|
|
i = mei_me_cl_by_id(dev, cl->me_client_id);
|
|
|
if (i < 0) {
|
|
|
cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
|
|
|
- return -ENODEV;
|
|
|
+ return -ENOTTY;
|
|
|
}
|
|
|
|
|
|
cb = mei_io_cb_init(cl, NULL);
|
|
|
@@ -666,8 +724,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
|
|
|
goto err;
|
|
|
|
|
|
cb->fop_type = MEI_FOP_READ;
|
|
|
- if (dev->hbuf_is_ready) {
|
|
|
- dev->hbuf_is_ready = false;
|
|
|
+ if (mei_hbuf_acquire(dev)) {
|
|
|
if (mei_hbm_cl_flow_control_req(dev, cl)) {
|
|
|
cl_err(dev, cl, "flow control send failed\n");
|
|
|
rets = -ENODEV;
|
|
|
@@ -687,27 +744,26 @@ err:
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * mei_cl_irq_write_complete - write a message to device
|
|
|
+ * mei_cl_irq_write - write a message to device
|
|
|
* from the interrupt thread context
|
|
|
*
|
|
|
* @cl: client
|
|
|
* @cb: callback block.
|
|
|
- * @slots: free slots.
|
|
|
* @cmpl_list: complete list.
|
|
|
*
|
|
|
* returns 0, OK; otherwise error.
|
|
|
*/
|
|
|
-int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
|
|
|
- s32 *slots, struct mei_cl_cb *cmpl_list)
|
|
|
+int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
|
|
+ struct mei_cl_cb *cmpl_list)
|
|
|
{
|
|
|
struct mei_device *dev;
|
|
|
struct mei_msg_data *buf;
|
|
|
struct mei_msg_hdr mei_hdr;
|
|
|
size_t len;
|
|
|
u32 msg_slots;
|
|
|
+ int slots;
|
|
|
int rets;
|
|
|
|
|
|
-
|
|
|
if (WARN_ON(!cl || !cl->dev))
|
|
|
return -ENODEV;
|
|
|
|
|
|
@@ -724,6 +780,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ slots = mei_hbuf_empty_slots(dev);
|
|
|
len = buf->size - cb->buf_idx;
|
|
|
msg_slots = mei_data2slots(len);
|
|
|
|
|
|
@@ -732,13 +789,13 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
|
|
|
mei_hdr.reserved = 0;
|
|
|
mei_hdr.internal = cb->internal;
|
|
|
|
|
|
- if (*slots >= msg_slots) {
|
|
|
+ if (slots >= msg_slots) {
|
|
|
mei_hdr.length = len;
|
|
|
mei_hdr.msg_complete = 1;
|
|
|
/* Split the message only if we can write the whole host buffer */
|
|
|
- } else if (*slots == dev->hbuf_depth) {
|
|
|
- msg_slots = *slots;
|
|
|
- len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
|
|
|
+ } else if (slots == dev->hbuf_depth) {
|
|
|
+ msg_slots = slots;
|
|
|
+ len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
|
|
|
mei_hdr.length = len;
|
|
|
mei_hdr.msg_complete = 0;
|
|
|
} else {
|
|
|
@@ -749,7 +806,6 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
|
|
|
cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
|
|
|
cb->request_buffer.size, cb->buf_idx);
|
|
|
|
|
|
- *slots -= msg_slots;
|
|
|
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
|
|
|
if (rets) {
|
|
|
cl->status = rets;
|
|
|
@@ -802,21 +858,29 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
|
|
|
|
|
|
|
|
|
cb->fop_type = MEI_FOP_WRITE;
|
|
|
+ cb->buf_idx = 0;
|
|
|
+ cl->writing_state = MEI_IDLE;
|
|
|
+
|
|
|
+ mei_hdr.host_addr = cl->host_client_id;
|
|
|
+ mei_hdr.me_addr = cl->me_client_id;
|
|
|
+ mei_hdr.reserved = 0;
|
|
|
+ mei_hdr.msg_complete = 0;
|
|
|
+ mei_hdr.internal = cb->internal;
|
|
|
|
|
|
rets = mei_cl_flow_ctrl_creds(cl);
|
|
|
if (rets < 0)
|
|
|
goto err;
|
|
|
|
|
|
- /* Host buffer is not ready, we queue the request */
|
|
|
- if (rets == 0 || !dev->hbuf_is_ready) {
|
|
|
- cb->buf_idx = 0;
|
|
|
- /* unseting complete will enqueue the cb for write */
|
|
|
- mei_hdr.msg_complete = 0;
|
|
|
+ if (rets == 0) {
|
|
|
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
|
|
|
+ rets = buf->size;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ if (!mei_hbuf_acquire(dev)) {
|
|
|
+ cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
|
|
|
rets = buf->size;
|
|
|
goto out;
|
|
|
}
|
|
|
-
|
|
|
- dev->hbuf_is_ready = false;
|
|
|
|
|
|
/* Check for a maximum length */
|
|
|
if (buf->size > mei_hbuf_max_len(dev)) {
|
|
|
@@ -827,12 +891,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
|
|
|
mei_hdr.msg_complete = 1;
|
|
|
}
|
|
|
|
|
|
- mei_hdr.host_addr = cl->host_client_id;
|
|
|
- mei_hdr.me_addr = cl->me_client_id;
|
|
|
- mei_hdr.reserved = 0;
|
|
|
- mei_hdr.internal = cb->internal;
|
|
|
-
|
|
|
-
|
|
|
rets = mei_write_message(dev, &mei_hdr, buf->data);
|
|
|
if (rets)
|
|
|
goto err;
|
|
|
@@ -840,13 +898,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
|
|
|
cl->writing_state = MEI_WRITING;
|
|
|
cb->buf_idx = mei_hdr.length;
|
|
|
|
|
|
- rets = buf->size;
|
|
|
out:
|
|
|
if (mei_hdr.msg_complete) {
|
|
|
- if (mei_cl_flow_ctrl_reduce(cl)) {
|
|
|
- rets = -ENODEV;
|
|
|
+ rets = mei_cl_flow_ctrl_reduce(cl);
|
|
|
+ if (rets < 0)
|
|
|
goto err;
|
|
|
- }
|
|
|
+
|
|
|
list_add_tail(&cb->list, &dev->write_waiting_list.list);
|
|
|
} else {
|
|
|
list_add_tail(&cb->list, &dev->write_list.list);
|
|
|
@@ -856,15 +913,18 @@ out:
|
|
|
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
|
|
|
|
|
|
mutex_unlock(&dev->device_lock);
|
|
|
- if (wait_event_interruptible(cl->tx_wait,
|
|
|
- cl->writing_state == MEI_WRITE_COMPLETE)) {
|
|
|
- if (signal_pending(current))
|
|
|
- rets = -EINTR;
|
|
|
- else
|
|
|
- rets = -ERESTARTSYS;
|
|
|
- }
|
|
|
+ rets = wait_event_interruptible(cl->tx_wait,
|
|
|
+ cl->writing_state == MEI_WRITE_COMPLETE);
|
|
|
mutex_lock(&dev->device_lock);
|
|
|
+ /* wait_event_interruptible returns -ERESTARTSYS */
|
|
|
+ if (rets) {
|
|
|
+ if (signal_pending(current))
|
|
|
+ rets = -EINTR;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
+ rets = buf->size;
|
|
|
err:
|
|
|
return rets;
|
|
|
}
|
|
|
@@ -905,9 +965,9 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
|
|
|
|
|
|
void mei_cl_all_disconnect(struct mei_device *dev)
|
|
|
{
|
|
|
- struct mei_cl *cl, *next;
|
|
|
+ struct mei_cl *cl;
|
|
|
|
|
|
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
|
|
|
+ list_for_each_entry(cl, &dev->file_list, link) {
|
|
|
cl->state = MEI_FILE_DISCONNECTED;
|
|
|
cl->mei_flow_ctrl_creds = 0;
|
|
|
cl->timer_count = 0;
|
|
|
@@ -922,8 +982,8 @@ void mei_cl_all_disconnect(struct mei_device *dev)
|
|
|
*/
|
|
|
void mei_cl_all_wakeup(struct mei_device *dev)
|
|
|
{
|
|
|
- struct mei_cl *cl, *next;
|
|
|
- list_for_each_entry_safe(cl, next, &dev->file_list, link) {
|
|
|
+ struct mei_cl *cl;
|
|
|
+ list_for_each_entry(cl, &dev->file_list, link) {
|
|
|
if (waitqueue_active(&cl->rx_wait)) {
|
|
|
cl_dbg(dev, cl, "Waking up reading client!\n");
|
|
|
wake_up_interruptible(&cl->rx_wait);
|
|
|
@@ -942,20 +1002,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)
|
|
|
*/
|
|
|
void mei_cl_all_write_clear(struct mei_device *dev)
|
|
|
{
|
|
|
- struct mei_cl_cb *cb, *next;
|
|
|
- struct list_head *list;
|
|
|
-
|
|
|
- list = &dev->write_list.list;
|
|
|
- list_for_each_entry_safe(cb, next, list, list) {
|
|
|
- list_del(&cb->list);
|
|
|
- mei_io_cb_free(cb);
|
|
|
- }
|
|
|
-
|
|
|
- list = &dev->write_waiting_list.list;
|
|
|
- list_for_each_entry_safe(cb, next, list, list) {
|
|
|
- list_del(&cb->list);
|
|
|
- mei_io_cb_free(cb);
|
|
|
- }
|
|
|
+ mei_io_list_free(&dev->write_list, NULL);
|
|
|
+ mei_io_list_free(&dev->write_waiting_list, NULL);
|
|
|
}
|
|
|
|
|
|
|