浏览代码

IB/core: Fix user mode post wr corruption

Commit e622f2f4ad21 ("IB: split struct ib_send_wr")
introduced a regression for HCAs whose user mode post
sends go through ib_uverbs_post_send().

The code didn't account for the fact that the first sge is
offset by an operation dependent length.  The allocation did,
but the pointer to the destination sge list is computed without
that knowledge.  The sge list copy_from_user() then corrupts
fields in the work request

Store the operation dependent length in a local variable and
compute the sge list copy_from_user() destination using that length.

Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Mike Marciniszyn 9 年之前
父节点
当前提交
1d784b890c
共有 1 个文件被更改,包括 10 次插入5 次删除
  1. 10 5
      drivers/infiniband/core/uverbs_cmd.c

+ 10 - 5
drivers/infiniband/core/uverbs_cmd.c

@@ -2446,6 +2446,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 	int                             i, sg_ind;
 	int                             i, sg_ind;
 	int				is_ud;
 	int				is_ud;
 	ssize_t                         ret = -EINVAL;
 	ssize_t                         ret = -EINVAL;
+	size_t                          next_size;
 
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 		return -EFAULT;
@@ -2490,7 +2491,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 				goto out_put;
 				goto out_put;
 			}
 			}
 
 
-			ud = alloc_wr(sizeof(*ud), user_wr->num_sge);
+			next_size = sizeof(*ud);
+			ud = alloc_wr(next_size, user_wr->num_sge);
 			if (!ud) {
 			if (!ud) {
 				ret = -ENOMEM;
 				ret = -ENOMEM;
 				goto out_put;
 				goto out_put;
@@ -2511,7 +2513,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 			   user_wr->opcode == IB_WR_RDMA_READ) {
 			   user_wr->opcode == IB_WR_RDMA_READ) {
 			struct ib_rdma_wr *rdma;
 			struct ib_rdma_wr *rdma;
 
 
-			rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge);
+			next_size = sizeof(*rdma);
+			rdma = alloc_wr(next_size, user_wr->num_sge);
 			if (!rdma) {
 			if (!rdma) {
 				ret = -ENOMEM;
 				ret = -ENOMEM;
 				goto out_put;
 				goto out_put;
@@ -2525,7 +2528,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
 			   user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
 			struct ib_atomic_wr *atomic;
 			struct ib_atomic_wr *atomic;
 
 
-			atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge);
+			next_size = sizeof(*atomic);
+			atomic = alloc_wr(next_size, user_wr->num_sge);
 			if (!atomic) {
 			if (!atomic) {
 				ret = -ENOMEM;
 				ret = -ENOMEM;
 				goto out_put;
 				goto out_put;
@@ -2540,7 +2544,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 		} else if (user_wr->opcode == IB_WR_SEND ||
 		} else if (user_wr->opcode == IB_WR_SEND ||
 			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
 			   user_wr->opcode == IB_WR_SEND_WITH_IMM ||
 			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
 			   user_wr->opcode == IB_WR_SEND_WITH_INV) {
-			next = alloc_wr(sizeof(*next), user_wr->num_sge);
+			next_size = sizeof(*next);
+			next = alloc_wr(next_size, user_wr->num_sge);
 			if (!next) {
 			if (!next) {
 				ret = -ENOMEM;
 				ret = -ENOMEM;
 				goto out_put;
 				goto out_put;
@@ -2572,7 +2577,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 
 
 		if (next->num_sge) {
 		if (next->num_sge) {
 			next->sg_list = (void *) next +
 			next->sg_list = (void *) next +
-				ALIGN(sizeof *next, sizeof (struct ib_sge));
+				ALIGN(next_size, sizeof(struct ib_sge));
 			if (copy_from_user(next->sg_list,
 			if (copy_from_user(next->sg_list,
 					   buf + sizeof cmd +
 					   buf + sizeof cmd +
 					   cmd.wr_count * cmd.wqe_size +
 					   cmd.wr_count * cmd.wqe_size +