瀏覽代碼

IB/rdmavt: Add Mem affinity support

Change verbs memory allocations to the device numa node.  This keeps memory
close to the device for optimal performance.

Reviewed-by: Dean Luick <dean.luick@intel.com>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Mitko Haralanov 9 年之前
父節點
當前提交
d1b697b678
共有 3 個文件被更改,包括 14 次插入11 次删除
  1. 1 1
      drivers/infiniband/sw/rdmavt/mmap.c
  2. 1 1
      drivers/infiniband/sw/rdmavt/mr.c
  3. 12 9
      drivers/infiniband/sw/rdmavt/qp.c

+ 1 - 1
drivers/infiniband/sw/rdmavt/mmap.c

@@ -157,7 +157,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
 {
 	struct rvt_mmap_info *ip;
 
-	ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+	ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
 	if (!ip)
 		return ip;
 

+ 1 - 1
drivers/infiniband/sw/rdmavt/mr.c

@@ -87,7 +87,7 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
 	}
 	lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
 	rdi->lkey_table.table = (struct rvt_mregion __rcu **)
-			       vmalloc(lk_tab_size);
+			       vmalloc_node(lk_tab_size, rdi->dparms.node);
 	if (!rdi->lkey_table.table)
 		return -ENOMEM;
 

+ 12 - 9
drivers/infiniband/sw/rdmavt/qp.c

@@ -186,7 +186,8 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
 		return -EINVAL;
 
 	/* allocate parent object */
-	rdi->qp_dev = kzalloc(sizeof(*rdi->qp_dev), GFP_KERNEL);
+	rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
+				   rdi->dparms.node);
 	if (!rdi->qp_dev)
 		return -ENOMEM;
 
@@ -194,9 +195,9 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
 	rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
 	rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
 	rdi->qp_dev->qp_table =
-		kmalloc(rdi->qp_dev->qp_table_size *
-			sizeof(*rdi->qp_dev->qp_table),
-			GFP_KERNEL);
+		kmalloc_node(rdi->qp_dev->qp_table_size *
+			     sizeof(*rdi->qp_dev->qp_table),
+			     GFP_KERNEL, rdi->dparms.node);
 	if (!rdi->qp_dev->qp_table)
 		goto no_qp_table;
 
@@ -542,8 +543,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 				(init_attr->cap.max_send_wr + 1) * sz,
 				gfp, PAGE_KERNEL);
 		else
-			swq = vmalloc(
-				(init_attr->cap.max_send_wr + 1) * sz);
+			swq = vmalloc_node(
+				(init_attr->cap.max_send_wr + 1) * sz,
+				rdi->dparms.node);
 		if (!swq)
 			return ERR_PTR(-ENOMEM);
 
@@ -558,7 +560,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 		} else if (init_attr->cap.max_recv_sge > 1)
 			sg_list_sz = sizeof(*qp->r_sg_list) *
 				(init_attr->cap.max_recv_sge - 1);
-		qp = kzalloc(sz + sg_list_sz, gfp);
+		qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
 		if (!qp)
 			goto bail_swq;
 
@@ -592,9 +594,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 						qp->r_rq.size * sz,
 						gfp, PAGE_KERNEL);
 			else
-				qp->r_rq.wq = vmalloc(
+				qp->r_rq.wq = vmalloc_node(
 						sizeof(struct rvt_rwq) +
-						qp->r_rq.size * sz);
+						qp->r_rq.size * sz,
+						rdi->dparms.node);
 			if (!qp->r_rq.wq)
 				goto bail_driver_priv;
 		}