|
@@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
|
|
to_copy = size - bytes_copied;
|
|
|
|
|
|
if (is_iovec) {
|
|
|
- struct iovec *iov = (struct iovec *)src;
|
|
|
+ struct msghdr *msg = (struct msghdr *)src;
|
|
|
int err;
|
|
|
|
|
|
/* The iovec will track bytes_copied internally. */
|
|
|
- err = memcpy_fromiovec((u8 *)va + page_offset,
|
|
|
- iov, to_copy);
|
|
|
+ err = memcpy_from_msg((u8 *)va + page_offset,
|
|
|
+ msg, to_copy);
|
|
|
if (err != 0) {
|
|
|
if (kernel_if->host)
|
|
|
kunmap(kernel_if->u.h.page[page_index]);
|
|
@@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
|
|
|
*/
|
|
|
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
|
|
|
u64 queue_offset,
|
|
|
- const void *src,
|
|
|
+ const void *msg,
|
|
|
size_t src_offset, size_t size)
|
|
|
{
|
|
|
|
|
@@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
|
|
|
* We ignore src_offset because src is really a struct iovec * and will
|
|
|
* maintain offset internally.
|
|
|
*/
|
|
|
- return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
|
|
|
+ return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
|
|
|
* of bytes enqueued or < 0 on error.
|
|
|
*/
|
|
|
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
|
|
- void *iov,
|
|
|
+ struct msghdr *msg,
|
|
|
size_t iov_size,
|
|
|
int buf_type)
|
|
|
{
|
|
|
ssize_t result;
|
|
|
|
|
|
- if (!qpair || !iov)
|
|
|
+ if (!qpair)
|
|
|
return VMCI_ERROR_INVALID_ARGS;
|
|
|
|
|
|
qp_lock(qpair);
|
|
@@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
|
|
result = qp_enqueue_locked(qpair->produce_q,
|
|
|
qpair->consume_q,
|
|
|
qpair->produce_q_size,
|
|
|
- iov, iov_size,
|
|
|
+ msg, iov_size,
|
|
|
qp_memcpy_to_queue_iov);
|
|
|
|
|
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|