瀏覽代碼

Merge branch 'mellanox' into k.o/for-next

Signed-off-by: Doug Ledford <dledford@redhat.com>
Doug Ledford 8 年之前
父節點
當前提交
a1139697ad
共有 35 個文件被更改,包括 330 次插入285 次删除
  1. 8 15
      drivers/infiniband/core/cache.c
  2. 2 2
      drivers/infiniband/core/cma.c
  3. 2 6
      drivers/infiniband/core/device.c
  4. 1 2
      drivers/infiniband/core/sa_query.c
  5. 51 76
      drivers/infiniband/core/uverbs_cmd.c
  6. 1 12
      drivers/infiniband/core/uverbs_main.c
  7. 12 22
      drivers/infiniband/core/verbs.c
  8. 1 1
      drivers/infiniband/hw/mlx4/alias_GUID.c
  9. 1 1
      drivers/infiniband/hw/mlx4/cq.c
  10. 4 5
      drivers/infiniband/hw/mlx4/mcg.c
  11. 11 11
      drivers/infiniband/hw/mlx4/qp.c
  12. 1 1
      drivers/infiniband/hw/mlx5/cq.c
  13. 1 1
      drivers/infiniband/hw/mlx5/mad.c
  14. 75 11
      drivers/infiniband/hw/mlx5/main.c
  15. 1 1
      drivers/infiniband/hw/mlx5/mlx5_ib.h
  16. 66 55
      drivers/infiniband/hw/mlx5/mr.c
  17. 5 0
      drivers/infiniband/hw/mlx5/qp.c
  18. 1 1
      drivers/infiniband/hw/mthca/mthca_cmd.c
  19. 0 5
      drivers/infiniband/hw/nes/nes_verbs.c
  20. 2 10
      drivers/infiniband/hw/usnic/usnic_fwd.c
  21. 1 1
      drivers/infiniband/hw/usnic/usnic_fwd.h
  22. 6 4
      drivers/infiniband/hw/usnic/usnic_ib_main.c
  23. 1 1
      drivers/infiniband/sw/rxe/rxe_hw_counters.c
  24. 1 0
      drivers/infiniband/ulp/ipoib/ipoib.h
  25. 7 1
      drivers/infiniband/ulp/ipoib/ipoib_cm.c
  26. 6 9
      drivers/infiniband/ulp/ipoib/ipoib_main.c
  27. 18 4
      drivers/infiniband/ulp/ipoib/ipoib_vlan.c
  28. 2 4
      drivers/infiniband/ulp/iser/iser_verbs.c
  29. 1 6
      drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
  30. 2 3
      drivers/infiniband/ulp/srpt/ib_srpt.c
  31. 1 1
      include/linux/mlx5/driver.h
  32. 8 4
      include/linux/mlx5/mlx5_ifc.h
  33. 5 5
      include/rdma/ib_verbs.h
  34. 2 4
      include/uapi/rdma/mlx4-abi.h
  35. 23 0
      include/uapi/rdma/mlx5-abi.h

+ 8 - 15
drivers/infiniband/core/cache.c

@@ -1199,30 +1199,23 @@ int ib_cache_setup_one(struct ib_device *device)
 	device->cache.ports =
 	device->cache.ports =
 		kzalloc(sizeof(*device->cache.ports) *
 		kzalloc(sizeof(*device->cache.ports) *
 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-	if (!device->cache.ports) {
-		err = -ENOMEM;
-		goto out;
-	}
+	if (!device->cache.ports)
+		return -ENOMEM;
 
 
 	err = gid_table_setup_one(device);
 	err = gid_table_setup_one(device);
-	if (err)
-		goto out;
+	if (err) {
+		kfree(device->cache.ports);
+		device->cache.ports = NULL;
+		return err;
+	}
 
 
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
 		ib_cache_update(device, p + rdma_start_port(device), true);
 		ib_cache_update(device, p + rdma_start_port(device), true);
 
 
 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
 			      device, ib_cache_event);
 			      device, ib_cache_event);
-	err = ib_register_event_handler(&device->cache.event_handler);
-	if (err)
-		goto err;
-
+	ib_register_event_handler(&device->cache.event_handler);
 	return 0;
 	return 0;
-
-err:
-	gid_table_cleanup_one(device);
-out:
-	return err;
 }
 }
 
 
 void ib_cache_release_one(struct ib_device *device)
 void ib_cache_release_one(struct ib_device *device)

+ 2 - 2
drivers/infiniband/core/cma.c

@@ -72,7 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL");
 #define CMA_MAX_CM_RETRIES 15
 #define CMA_MAX_CM_RETRIES 15
 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
 #define CMA_IBOE_PACKET_LIFETIME 18
 #define CMA_IBOE_PACKET_LIFETIME 18
-#define CMA_PREFERRED_ROCE_GID_TYPE (1 << IB_GID_TYPE_ROCE_UDP_ENCAP)
+#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
 
 
 static const char * const cma_events[] = {
 static const char * const cma_events[] = {
 	[RDMA_CM_EVENT_ADDR_RESOLVED]	 = "address resolved",
 	[RDMA_CM_EVENT_ADDR_RESOLVED]	 = "address resolved",
@@ -4282,7 +4282,7 @@ static void cma_add_one(struct ib_device *device)
 	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
 	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
 		supported_gids = roce_gid_type_mask_support(device, i);
 		supported_gids = roce_gid_type_mask_support(device, i);
 		WARN_ON(!supported_gids);
 		WARN_ON(!supported_gids);
-		if (supported_gids & CMA_PREFERRED_ROCE_GID_TYPE)
+		if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
 			cma_dev->default_gid_type[i - rdma_start_port(device)] =
 			cma_dev->default_gid_type[i - rdma_start_port(device)] =
 				CMA_PREFERRED_ROCE_GID_TYPE;
 				CMA_PREFERRED_ROCE_GID_TYPE;
 		else
 		else

+ 2 - 6
drivers/infiniband/core/device.c

@@ -747,7 +747,7 @@ EXPORT_SYMBOL(ib_set_client_data);
  * chapter 11 of the InfiniBand Architecture Specification).  This
  * chapter 11 of the InfiniBand Architecture Specification).  This
  * callback may occur in interrupt context.
  * callback may occur in interrupt context.
  */
  */
-int ib_register_event_handler  (struct ib_event_handler *event_handler)
+void ib_register_event_handler(struct ib_event_handler *event_handler)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -755,8 +755,6 @@ int ib_register_event_handler  (struct ib_event_handler *event_handler)
 	list_add_tail(&event_handler->list,
 	list_add_tail(&event_handler->list,
 		      &event_handler->device->event_handler_list);
 		      &event_handler->device->event_handler_list);
 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
-
-	return 0;
 }
 }
 EXPORT_SYMBOL(ib_register_event_handler);
 EXPORT_SYMBOL(ib_register_event_handler);
 
 
@@ -767,15 +765,13 @@ EXPORT_SYMBOL(ib_register_event_handler);
  * Unregister an event handler registered with
  * Unregister an event handler registered with
  * ib_register_event_handler().
  * ib_register_event_handler().
  */
  */
-int ib_unregister_event_handler(struct ib_event_handler *event_handler)
+void ib_unregister_event_handler(struct ib_event_handler *event_handler)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
 	list_del(&event_handler->list);
 	list_del(&event_handler->list);
 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
-
-	return 0;
 }
 }
 EXPORT_SYMBOL(ib_unregister_event_handler);
 EXPORT_SYMBOL(ib_unregister_event_handler);
 
 

+ 1 - 2
drivers/infiniband/core/sa_query.c

@@ -2417,8 +2417,7 @@ static void ib_sa_add_one(struct ib_device *device)
 	 */
 	 */
 
 
 	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
 	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
-	if (ib_register_event_handler(&sa_dev->event_handler))
-		goto err;
+	ib_register_event_handler(&sa_dev->event_handler);
 
 
 	for (i = 0; i <= e - s; ++i) {
 	for (i = 0; i <= e - s; ++i) {
 		if (rdma_cap_ib_sa(device, i + 1))
 		if (rdma_cap_ib_sa(device, i + 1))

+ 51 - 76
drivers/infiniband/core/uverbs_cmd.c

@@ -1820,6 +1820,28 @@ err_put:
 	return ret;
 	return ret;
 }
 }
 
 
+static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
+				   struct rdma_ah_attr *rdma_attr)
+{
+	const struct ib_global_route   *grh;
+
+	uverb_attr->dlid              = rdma_ah_get_dlid(rdma_attr);
+	uverb_attr->sl                = rdma_ah_get_sl(rdma_attr);
+	uverb_attr->src_path_bits     = rdma_ah_get_path_bits(rdma_attr);
+	uverb_attr->static_rate       = rdma_ah_get_static_rate(rdma_attr);
+	uverb_attr->is_global         = !!(rdma_ah_get_ah_flags(rdma_attr) &
+					 IB_AH_GRH);
+	if (uverb_attr->is_global) {
+		grh = rdma_ah_read_grh(rdma_attr);
+		memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
+		uverb_attr->flow_label        = grh->flow_label;
+		uverb_attr->sgid_index        = grh->sgid_index;
+		uverb_attr->hop_limit         = grh->hop_limit;
+		uverb_attr->traffic_class     = grh->traffic_class;
+	}
+	uverb_attr->port_num          = rdma_ah_get_port_num(rdma_attr);
+}
+
 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
 			   struct ib_device *ib_dev,
 			   struct ib_device *ib_dev,
 			   const char __user *buf, int in_len,
 			   const char __user *buf, int in_len,
@@ -1830,7 +1852,6 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
 	struct ib_qp                   *qp;
 	struct ib_qp                   *qp;
 	struct ib_qp_attr              *attr;
 	struct ib_qp_attr              *attr;
 	struct ib_qp_init_attr         *init_attr;
 	struct ib_qp_init_attr         *init_attr;
-	const struct ib_global_route   *grh;
 	int                            ret;
 	int                            ret;
 
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 	if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1880,39 +1901,8 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
 	resp.alt_port_num           = attr->alt_port_num;
 	resp.alt_port_num           = attr->alt_port_num;
 	resp.alt_timeout            = attr->alt_timeout;
 	resp.alt_timeout            = attr->alt_timeout;
 
 
-	resp.dest.dlid              = rdma_ah_get_dlid(&attr->ah_attr);
-	resp.dest.sl                = rdma_ah_get_sl(&attr->ah_attr);
-	resp.dest.src_path_bits     = rdma_ah_get_path_bits(&attr->ah_attr);
-	resp.dest.static_rate       = rdma_ah_get_static_rate(&attr->ah_attr);
-	resp.dest.is_global         = !!(rdma_ah_get_ah_flags(&attr->ah_attr) &
-					 IB_AH_GRH);
-	if (resp.dest.is_global) {
-		grh = rdma_ah_read_grh(&attr->ah_attr);
-		memcpy(resp.dest.dgid, grh->dgid.raw, 16);
-		resp.dest.flow_label        = grh->flow_label;
-		resp.dest.sgid_index        = grh->sgid_index;
-		resp.dest.hop_limit         = grh->hop_limit;
-		resp.dest.traffic_class     = grh->traffic_class;
-	}
-	resp.dest.port_num          = rdma_ah_get_port_num(&attr->ah_attr);
-
-	resp.alt_dest.dlid          = rdma_ah_get_dlid(&attr->alt_ah_attr);
-	resp.alt_dest.sl            = rdma_ah_get_sl(&attr->alt_ah_attr);
-	resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr);
-	resp.alt_dest.static_rate
-			= rdma_ah_get_static_rate(&attr->alt_ah_attr);
-	resp.alt_dest.is_global
-			= !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) &
-						  IB_AH_GRH);
-	if (resp.alt_dest.is_global) {
-		grh = rdma_ah_read_grh(&attr->alt_ah_attr);
-		memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16);
-		resp.alt_dest.flow_label    = grh->flow_label;
-		resp.alt_dest.sgid_index    = grh->sgid_index;
-		resp.alt_dest.hop_limit     = grh->hop_limit;
-		resp.alt_dest.traffic_class = grh->traffic_class;
-	}
-	resp.alt_dest.port_num      = rdma_ah_get_port_num(&attr->alt_ah_attr);
+	copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
+	copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
 
 
 	resp.max_send_wr            = init_attr->cap.max_send_wr;
 	resp.max_send_wr            = init_attr->cap.max_send_wr;
 	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
 	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
@@ -1946,6 +1936,29 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
 	}
 	}
 }
 }
 
 
+static void copy_ah_attr_from_uverbs(struct ib_device *dev,
+				     struct rdma_ah_attr *rdma_attr,
+				     struct ib_uverbs_qp_dest *uverb_attr)
+{
+	rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
+	if (uverb_attr->is_global) {
+		rdma_ah_set_grh(rdma_attr, NULL,
+				uverb_attr->flow_label,
+				uverb_attr->sgid_index,
+				uverb_attr->hop_limit,
+				uverb_attr->traffic_class);
+		rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
+	} else {
+		rdma_ah_set_ah_flags(rdma_attr, 0);
+	}
+	rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
+	rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
+	rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
+	rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
+	rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
+	rdma_ah_set_make_grd(rdma_attr, false);
+}
+
 static int modify_qp(struct ib_uverbs_file *file,
 static int modify_qp(struct ib_uverbs_file *file,
 		     struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
 		     struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
 {
 {
@@ -1993,50 +2006,12 @@ static int modify_qp(struct ib_uverbs_file *file,
 	attr->rate_limit	  = cmd->rate_limit;
 	attr->rate_limit	  = cmd->rate_limit;
 
 
 	if (cmd->base.attr_mask & IB_QP_AV)
 	if (cmd->base.attr_mask & IB_QP_AV)
-		attr->ah_attr.type = rdma_ah_find_type(qp->device,
-						       cmd->base.dest.port_num);
-	if (cmd->base.dest.is_global) {
-		rdma_ah_set_grh(&attr->ah_attr, NULL,
-				cmd->base.dest.flow_label,
-				cmd->base.dest.sgid_index,
-				cmd->base.dest.hop_limit,
-				cmd->base.dest.traffic_class);
-		rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid);
-	} else {
-		rdma_ah_set_ah_flags(&attr->ah_attr, 0);
-	}
-	rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid);
-	rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl);
-	rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits);
-	rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate);
-	rdma_ah_set_port_num(&attr->ah_attr,
-			     cmd->base.dest.port_num);
-	rdma_ah_set_make_grd(&attr->ah_attr, false);
+		copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
+					 &cmd->base.dest);
 
 
 	if (cmd->base.attr_mask & IB_QP_ALT_PATH)
 	if (cmd->base.attr_mask & IB_QP_ALT_PATH)
-		attr->alt_ah_attr.type =
-			rdma_ah_find_type(qp->device, cmd->base.dest.port_num);
-	if (cmd->base.alt_dest.is_global) {
-		rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
-				cmd->base.alt_dest.flow_label,
-				cmd->base.alt_dest.sgid_index,
-				cmd->base.alt_dest.hop_limit,
-				cmd->base.alt_dest.traffic_class);
-		rdma_ah_set_dgid_raw(&attr->alt_ah_attr,
-				     cmd->base.alt_dest.dgid);
-	} else {
-		rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0);
-	}
-
-	rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid);
-	rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl);
-	rdma_ah_set_path_bits(&attr->alt_ah_attr,
-			      cmd->base.alt_dest.src_path_bits);
-	rdma_ah_set_static_rate(&attr->alt_ah_attr,
-				cmd->base.alt_dest.static_rate);
-	rdma_ah_set_port_num(&attr->alt_ah_attr,
-			     cmd->base.alt_dest.port_num);
-	rdma_ah_set_make_grd(&attr->alt_ah_attr, false);
+		copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
+					 &cmd->base.alt_dest);
 
 
 	ret = ib_modify_qp_with_udata(qp, attr,
 	ret = ib_modify_qp_with_udata(qp, attr,
 				      modify_qp_mask(qp->qp_type,
 				      modify_qp_mask(qp->qp_type,

+ 1 - 12
drivers/infiniband/core/uverbs_main.c

@@ -595,7 +595,6 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file
 {
 {
 	struct ib_uverbs_async_event_file *ev_file;
 	struct ib_uverbs_async_event_file *ev_file;
 	struct file *filp;
 	struct file *filp;
-	int ret;
 
 
 	ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
 	ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
 	if (!ev_file)
 	if (!ev_file)
@@ -621,21 +620,11 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file
 	INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
 	INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
 			      ib_dev,
 			      ib_dev,
 			      ib_uverbs_event_handler);
 			      ib_uverbs_event_handler);
-	ret = ib_register_event_handler(&uverbs_file->event_handler);
-	if (ret)
-		goto err_put_file;
-
+	ib_register_event_handler(&uverbs_file->event_handler);
 	/* At that point async file stuff was fully set */
 	/* At that point async file stuff was fully set */
 
 
 	return filp;
 	return filp;
 
 
-err_put_file:
-	fput(filp);
-	kref_put(&uverbs_file->async_file->ref,
-		 ib_uverbs_release_async_event_file);
-	uverbs_file->async_file = NULL;
-	return ERR_PTR(ret);
-
 err_put_refs:
 err_put_refs:
 	kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
 	kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
 	kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
 	kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);

+ 12 - 22
drivers/infiniband/core/verbs.c

@@ -180,39 +180,29 @@ EXPORT_SYMBOL(ib_rate_to_mbps);
 __attribute_const__ enum rdma_transport_type
 __attribute_const__ enum rdma_transport_type
 rdma_node_get_transport(enum rdma_node_type node_type)
 rdma_node_get_transport(enum rdma_node_type node_type)
 {
 {
-	switch (node_type) {
-	case RDMA_NODE_IB_CA:
-	case RDMA_NODE_IB_SWITCH:
-	case RDMA_NODE_IB_ROUTER:
-		return RDMA_TRANSPORT_IB;
-	case RDMA_NODE_RNIC:
-		return RDMA_TRANSPORT_IWARP;
-	case RDMA_NODE_USNIC:
+
+	if (node_type == RDMA_NODE_USNIC)
 		return RDMA_TRANSPORT_USNIC;
 		return RDMA_TRANSPORT_USNIC;
-	case RDMA_NODE_USNIC_UDP:
+	if (node_type == RDMA_NODE_USNIC_UDP)
 		return RDMA_TRANSPORT_USNIC_UDP;
 		return RDMA_TRANSPORT_USNIC_UDP;
-	default:
-		BUG();
-		return 0;
-	}
+	if (node_type == RDMA_NODE_RNIC)
+		return RDMA_TRANSPORT_IWARP;
+
+	return RDMA_TRANSPORT_IB;
 }
 }
 EXPORT_SYMBOL(rdma_node_get_transport);
 EXPORT_SYMBOL(rdma_node_get_transport);
 
 
 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
 {
 {
+	enum rdma_transport_type lt;
 	if (device->get_link_layer)
 	if (device->get_link_layer)
 		return device->get_link_layer(device, port_num);
 		return device->get_link_layer(device, port_num);
 
 
-	switch (rdma_node_get_transport(device->node_type)) {
-	case RDMA_TRANSPORT_IB:
+	lt = rdma_node_get_transport(device->node_type);
+	if (lt == RDMA_TRANSPORT_IB)
 		return IB_LINK_LAYER_INFINIBAND;
 		return IB_LINK_LAYER_INFINIBAND;
-	case RDMA_TRANSPORT_IWARP:
-	case RDMA_TRANSPORT_USNIC:
-	case RDMA_TRANSPORT_USNIC_UDP:
-		return IB_LINK_LAYER_ETHERNET;
-	default:
-		return IB_LINK_LAYER_UNSPECIFIED;
-	}
+
+	return IB_LINK_LAYER_ETHERNET;
 }
 }
 EXPORT_SYMBOL(rdma_port_get_link_layer);
 EXPORT_SYMBOL(rdma_port_get_link_layer);
 
 

+ 1 - 1
drivers/infiniband/hw/mlx4/alias_GUID.c

@@ -781,7 +781,7 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
 	if (!dev->sriov.is_going_down) {
 	if (!dev->sriov.is_going_down) {
-		/* If there is pending one should cancell then run, otherwise
+		/* If there is pending one should cancel then run, otherwise
 		  * won't run till previous one is ended as same work
 		  * won't run till previous one is ended as same work
 		  * struct is used.
 		  * struct is used.
 		  */
 		  */

+ 1 - 1
drivers/infiniband/hw/mlx4/cq.c

@@ -637,7 +637,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
 	struct mlx4_ib_qp *qp;
 	struct mlx4_ib_qp *qp;
 
 
 	*npolled = 0;
 	*npolled = 0;
-	/* Find uncompleted WQEs belonging to that cq and retrun
+	/* Find uncompleted WQEs belonging to that cq and return
 	 * simulated FLUSH_ERR completions
 	 * simulated FLUSH_ERR completions
 	 */
 	 */
 	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
 	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {

+ 4 - 5
drivers/infiniband/hw/mlx4/mcg.c

@@ -808,8 +808,7 @@ static ssize_t sysfs_show_group(struct device *dev,
 		struct device_attribute *attr, char *buf);
 		struct device_attribute *attr, char *buf);
 
 
 static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
 static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
-					 union ib_gid *mgid, int create,
-					 gfp_t gfp_mask)
+					 union ib_gid *mgid, int create)
 {
 {
 	struct mcast_group *group, *cur_group;
 	struct mcast_group *group, *cur_group;
 	int is_mgid0;
 	int is_mgid0;
@@ -825,7 +824,7 @@ static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
 	if (!create)
 	if (!create)
 		return ERR_PTR(-ENOENT);
 		return ERR_PTR(-ENOENT);
 
 
-	group = kzalloc(sizeof *group, gfp_mask);
+	group = kzalloc(sizeof(*group), GFP_KERNEL);
 	if (!group)
 	if (!group)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 
 
@@ -892,7 +891,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
 	case IB_MGMT_METHOD_GET_RESP:
 	case IB_MGMT_METHOD_GET_RESP:
 	case IB_SA_METHOD_DELETE_RESP:
 	case IB_SA_METHOD_DELETE_RESP:
 		mutex_lock(&ctx->mcg_table_lock);
 		mutex_lock(&ctx->mcg_table_lock);
-		group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
+		group = acquire_group(ctx, &rec->mgid, 0);
 		mutex_unlock(&ctx->mcg_table_lock);
 		mutex_unlock(&ctx->mcg_table_lock);
 		if (IS_ERR(group)) {
 		if (IS_ERR(group)) {
 			if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
 			if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
@@ -954,7 +953,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
 		req->sa_mad = *sa_mad;
 		req->sa_mad = *sa_mad;
 
 
 		mutex_lock(&ctx->mcg_table_lock);
 		mutex_lock(&ctx->mcg_table_lock);
-		group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
+		group = acquire_group(ctx, &rec->mgid, may_create);
 		mutex_unlock(&ctx->mcg_table_lock);
 		mutex_unlock(&ctx->mcg_table_lock);
 		if (IS_ERR(group)) {
 		if (IS_ERR(group)) {
 			kfree(req);
 			kfree(req);

+ 11 - 11
drivers/infiniband/hw/mlx4/qp.c

@@ -748,7 +748,7 @@ static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
 	INIT_LIST_HEAD(&qp->gid_list);
 	INIT_LIST_HEAD(&qp->gid_list);
 	INIT_LIST_HEAD(&qp->steering_rules);
 	INIT_LIST_HEAD(&qp->steering_rules);
 
 
-	qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_ETHERTYPE;
+	qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
 	qp->state = IB_QPS_RESET;
 	qp->state = IB_QPS_RESET;
 
 
 	/* Set dummy send resources to be compatible with HV and PRM */
 	/* Set dummy send resources to be compatible with HV and PRM */
@@ -812,6 +812,9 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
 		return ERR_PTR(-EFAULT);
 		return ERR_PTR(-EFAULT);
 	}
 	}
 
 
+	if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
+		return ERR_PTR(-EOPNOTSUPP);
+
 	if (ucmd.comp_mask || ucmd.reserved1)
 	if (ucmd.comp_mask || ucmd.reserved1)
 		return ERR_PTR(-EOPNOTSUPP);
 		return ERR_PTR(-EOPNOTSUPP);
 
 
@@ -1046,9 +1049,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		}
 		}
 
 
 		if (src == MLX4_IB_RWQ_SRC) {
 		if (src == MLX4_IB_RWQ_SRC) {
-			if (ucmd.wq.comp_mask || ucmd.wq.reserved1 ||
-			    ucmd.wq.reserved[0] || ucmd.wq.reserved[1] ||
-			    ucmd.wq.reserved[2]) {
+			if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
+			    ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
 				pr_debug("user command isn't supported\n");
 				pr_debug("user command isn't supported\n");
 				err = -EOPNOTSUPP;
 				err = -EOPNOTSUPP;
 				goto err;
 				goto err;
@@ -2027,8 +2029,8 @@ static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
  */
  */
 static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
 static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
 {
 {
+	int err = 0;
 	int i;
 	int i;
-	int err;
 
 
 	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
 	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
 		struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
 		struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
@@ -2723,19 +2725,17 @@ enum {
 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 			      int attr_mask, struct ib_udata *udata)
 			      int attr_mask, struct ib_udata *udata)
 {
 {
+	enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	enum ib_qp_state cur_state, new_state;
 	enum ib_qp_state cur_state, new_state;
 	int err = -EINVAL;
 	int err = -EINVAL;
-	int ll;
 	mutex_lock(&qp->mutex);
 	mutex_lock(&qp->mutex);
 
 
 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
 
 
-	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
-		ll = IB_LINK_LAYER_UNSPECIFIED;
-	} else {
+	if (cur_state != new_state || cur_state != IB_QPS_RESET) {
 		int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
 		int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
 		ll = rdma_port_get_link_layer(&dev->ib_dev, port);
 		ll = rdma_port_get_link_layer(&dev->ib_dev, port);
 	}
 	}
@@ -4146,8 +4146,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
 	if (!(udata && pd->uobject))
 	if (!(udata && pd->uobject))
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
 
 
-	required_cmd_sz = offsetof(typeof(ucmd), reserved) +
-			  sizeof(ucmd.reserved);
+	required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
+			  sizeof(ucmd.comp_mask);
 	if (udata->inlen < required_cmd_sz) {
 	if (udata->inlen < required_cmd_sz) {
 		pr_debug("invalid inlen\n");
 		pr_debug("invalid inlen\n");
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);

+ 1 - 1
drivers/infiniband/hw/mlx5/cq.c

@@ -499,7 +499,7 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
 	struct mlx5_ib_qp *qp;
 	struct mlx5_ib_qp *qp;
 
 
 	*npolled = 0;
 	*npolled = 0;
-	/* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
+	/* Find uncompleted WQEs belonging to that cq and return mmics ones */
 	list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
 	list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
 		sw_send_comp(qp, num_entries, wc + *npolled, npolled);
 		sw_send_comp(qp, num_entries, wc + *npolled, npolled);
 		if (*npolled >= num_entries)
 		if (*npolled >= num_entries)

+ 1 - 1
drivers/infiniband/hw/mlx5/mad.c

@@ -204,7 +204,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
 	int err;
 	int err;
 	void *out_cnt;
 	void *out_cnt;
 
 
-	/* Decalring support of extended counters */
+	/* Declaring support of extended counters */
 	if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
 	if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
 		struct ib_class_port_info cpi = {};
 		struct ib_class_port_info cpi = {};
 
 

+ 75 - 11
drivers/infiniband/hw/mlx5/main.c

@@ -802,8 +802,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 
 
 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
 	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
 			uhw->outlen)) {
 			uhw->outlen)) {
-		resp.mlx5_ib_support_multi_pkt_send_wqes =
-			MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
+			resp.mlx5_ib_support_multi_pkt_send_wqes =
+				MLX5_IB_ALLOW_MPW;
+
+		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+			resp.mlx5_ib_support_multi_pkt_send_wqes |=
+				MLX5_IB_SUPPORT_EMPW;
+
 		resp.response_length +=
 		resp.response_length +=
 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
 			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
 	}
 	}
@@ -811,6 +817,27 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 	if (field_avail(typeof(resp), reserved, uhw->outlen))
 	if (field_avail(typeof(resp), reserved, uhw->outlen))
 		resp.response_length += sizeof(resp.reserved);
 		resp.response_length += sizeof(resp.reserved);
 
 
+	if (field_avail(typeof(resp), sw_parsing_caps,
+			uhw->outlen)) {
+		resp.response_length += sizeof(resp.sw_parsing_caps);
+		if (MLX5_CAP_ETH(mdev, swp)) {
+			resp.sw_parsing_caps.sw_parsing_offloads |=
+				MLX5_IB_SW_PARSING;
+
+			if (MLX5_CAP_ETH(mdev, swp_csum))
+				resp.sw_parsing_caps.sw_parsing_offloads |=
+					MLX5_IB_SW_PARSING_CSUM;
+
+			if (MLX5_CAP_ETH(mdev, swp_lso))
+				resp.sw_parsing_caps.sw_parsing_offloads |=
+					MLX5_IB_SW_PARSING_LSO;
+
+			if (resp.sw_parsing_caps.sw_parsing_offloads)
+				resp.sw_parsing_caps.supported_qpts =
+					BIT(IB_QPT_RAW_PACKET);
+		}
+	}
+
 	if (uhw->outlen) {
 	if (uhw->outlen) {
 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 
 
@@ -2104,7 +2131,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
  * it won't fall into the multicast flow steering table and this rule
  * it won't fall into the multicast flow steering table and this rule
  * could steal other multicast packets.
  * could steal other multicast packets.
  */
  */
-static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
+static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
 {
 {
 	union ib_flow_spec *flow_spec;
 	union ib_flow_spec *flow_spec;
 
 
@@ -2316,10 +2343,31 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
 	return err ? ERR_PTR(err) : prio;
 	return err ? ERR_PTR(err) : prio;
 }
 }
 
 
-static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
-						     struct mlx5_ib_flow_prio *ft_prio,
-						     const struct ib_flow_attr *flow_attr,
-						     struct mlx5_flow_destination *dst)
+static void set_underlay_qp(struct mlx5_ib_dev *dev,
+			    struct mlx5_flow_spec *spec,
+			    u32 underlay_qpn)
+{
+	void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
+					   spec->match_criteria,
+					   misc_parameters);
+	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+					   misc_parameters);
+
+	if (underlay_qpn &&
+	    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+				      ft_field_support.bth_dst_qp)) {
+		MLX5_SET(fte_match_set_misc,
+			 misc_params_v, bth_dst_qp, underlay_qpn);
+		MLX5_SET(fte_match_set_misc,
+			 misc_params_c, bth_dst_qp, 0xffffff);
+	}
+}
+
+static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
+						      struct mlx5_ib_flow_prio *ft_prio,
+						      const struct ib_flow_attr *flow_attr,
+						      struct mlx5_flow_destination *dst,
+						      u32 underlay_qpn)
 {
 {
 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
 	struct mlx5_ib_flow_handler *handler;
 	struct mlx5_ib_flow_handler *handler;
@@ -2355,6 +2403,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
 	}
 	}
 
 
+	if (!flow_is_multicast_only(flow_attr))
+		set_underlay_qp(dev, spec, underlay_qpn);
+
 	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
 	spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
 	if (is_drop) {
 	if (is_drop) {
 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
 		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
@@ -2394,6 +2445,14 @@ free:
 	return err ? ERR_PTR(err) : handler;
 	return err ? ERR_PTR(err) : handler;
 }
 }
 
 
+static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
+						     struct mlx5_ib_flow_prio *ft_prio,
+						     const struct ib_flow_attr *flow_attr,
+						     struct mlx5_flow_destination *dst)
+{
+	return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
+}
+
 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
 							  struct mlx5_ib_flow_prio *ft_prio,
 							  struct mlx5_ib_flow_prio *ft_prio,
 							  struct ib_flow_attr *flow_attr,
 							  struct ib_flow_attr *flow_attr,
@@ -2530,6 +2589,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
 	struct mlx5_ib_flow_prio *ft_prio;
 	struct mlx5_ib_flow_prio *ft_prio;
 	int err;
 	int err;
+	int underlay_qpn;
 
 
 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -2570,8 +2630,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
 			handler = create_dont_trap_rule(dev, ft_prio,
 			handler = create_dont_trap_rule(dev, ft_prio,
 							flow_attr, dst);
 							flow_attr, dst);
 		} else {
 		} else {
-			handler = create_flow_rule(dev, ft_prio, flow_attr,
-						   dst);
+			underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
+					mqp->underlay_qpn : 0;
+			handler = _create_flow_rule(dev, ft_prio, flow_attr,
+						    dst, underlay_qpn);
 		}
 		}
 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
@@ -3793,6 +3855,8 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
 	if (!dbg->timeout_debugfs)
 	if (!dbg->timeout_debugfs)
 		goto out_debugfs;
 		goto out_debugfs;
 
 
+	dev->delay_drop.dbg = dbg;
+
 	return 0;
 	return 0;
 
 
 out_debugfs:
 out_debugfs:
@@ -3817,8 +3881,8 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
 		mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
 		mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
 }
 }
 
 
-const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev,
-		int comp_vector)
+static const struct cpumask *
+mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
 {
 {
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
 
 

+ 1 - 1
drivers/infiniband/hw/mlx5/mlx5_ib.h

@@ -503,7 +503,7 @@ struct mlx5_ib_mr {
 	struct mlx5_shared_mr_info	*smr_info;
 	struct mlx5_shared_mr_info	*smr_info;
 	struct list_head	list;
 	struct list_head	list;
 	int			order;
 	int			order;
-	int			umred;
+	bool			allocated_from_cache;
 	int			npages;
 	int			npages;
 	struct mlx5_ib_dev     *dev;
 	struct mlx5_ib_dev     *dev;
 	u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
 	u32 out[MLX5_ST_SZ_DW(create_mkey_out)];

+ 66 - 55
drivers/infiniband/hw/mlx5/mr.c

@@ -48,8 +48,7 @@ enum {
 #define MLX5_UMR_ALIGN 2048
 #define MLX5_UMR_ALIGN 2048
 
 
 static int clean_mr(struct mlx5_ib_mr *mr);
 static int clean_mr(struct mlx5_ib_mr *mr);
-static int max_umr_order(struct mlx5_ib_dev *dev);
-static int use_umr(struct mlx5_ib_dev *dev, int order);
+static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 
 
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -184,7 +183,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
 			break;
 			break;
 		}
 		}
 		mr->order = ent->order;
 		mr->order = ent->order;
-		mr->umred = 1;
+		mr->allocated_from_cache = 1;
 		mr->dev = dev;
 		mr->dev = dev;
 
 
 		MLX5_SET(mkc, mkc, free, 1);
 		MLX5_SET(mkc, mkc, free, 1);
@@ -497,7 +496,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
 	int i;
 	int i;
 
 
 	c = order2idx(dev, order);
 	c = order2idx(dev, order);
-	last_umr_cache_entry = order2idx(dev, max_umr_order(dev));
+	last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
 	if (c < 0 || c > last_umr_cache_entry) {
 	if (c < 0 || c > last_umr_cache_entry) {
 		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
 		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
 		return NULL;
 		return NULL;
@@ -677,12 +676,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
 		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
 		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
 		queue_work(cache->wq, &ent->work);
 		queue_work(cache->wq, &ent->work);
 
 
-		if (i > MAX_UMR_CACHE_ENTRY) {
+		if (i > MR_CACHE_LAST_STD_ENTRY) {
 			mlx5_odp_init_mr_cache_entry(ent);
 			mlx5_odp_init_mr_cache_entry(ent);
 			continue;
 			continue;
 		}
 		}
 
 
-		if (!use_umr(dev, ent->order))
+		if (ent->order > mr_cache_max_order(dev))
 			continue;
 			continue;
 
 
 		ent->page = PAGE_SHIFT;
 		ent->page = PAGE_SHIFT;
@@ -809,28 +808,24 @@ err_free:
 	return ERR_PTR(err);
 	return ERR_PTR(err);
 }
 }
 
 
-static int get_octo_len(u64 addr, u64 len, int page_size)
+static int get_octo_len(u64 addr, u64 len, int page_shift)
 {
 {
+	u64 page_size = 1ULL << page_shift;
 	u64 offset;
 	u64 offset;
 	int npages;
 	int npages;
 
 
 	offset = addr & (page_size - 1);
 	offset = addr & (page_size - 1);
-	npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
+	npages = ALIGN(len + offset, page_size) >> page_shift;
 	return (npages + 1) / 2;
 	return (npages + 1) / 2;
 }
 }
 
 
-static int max_umr_order(struct mlx5_ib_dev *dev)
+static int mr_cache_max_order(struct mlx5_ib_dev *dev)
 {
 {
 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
-		return MAX_UMR_CACHE_ENTRY + 2;
+		return MR_CACHE_LAST_STD_ENTRY + 2;
 	return MLX5_MAX_UMR_SHIFT;
 	return MLX5_MAX_UMR_SHIFT;
 }
 }
 
 
-static int use_umr(struct mlx5_ib_dev *dev, int order)
-{
-	return order <= max_umr_order(dev);
-}
-
 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
 		       int access_flags, struct ib_umem **umem,
 		       int access_flags, struct ib_umem **umem,
 		       int *npages, int *page_shift, int *ncont,
 		       int *npages, int *page_shift, int *ncont,
@@ -904,7 +899,8 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
 	return err;
 	return err;
 }
 }
 
 
-static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
+static struct mlx5_ib_mr *alloc_mr_from_cache(
+				  struct ib_pd *pd, struct ib_umem *umem,
 				  u64 virt_addr, u64 len, int npages,
 				  u64 virt_addr, u64 len, int npages,
 				  int page_shift, int order, int access_flags)
 				  int page_shift, int order, int access_flags)
 {
 {
@@ -936,16 +932,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
 	mr->mmkey.size = len;
 	mr->mmkey.size = len;
 	mr->mmkey.pd = to_mpd(pd)->pdn;
 	mr->mmkey.pd = to_mpd(pd)->pdn;
 
 
-	err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
-				 MLX5_IB_UPD_XLT_ENABLE);
-
-	if (err) {
-		mlx5_mr_cache_free(dev, mr);
-		return ERR_PTR(err);
-	}
-
-	mr->live = 1;
-
 	return mr;
 	return mr;
 }
 }
 
 
@@ -1111,7 +1097,8 @@ free_xlt:
 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
 				     u64 virt_addr, u64 length,
 				     u64 virt_addr, u64 length,
 				     struct ib_umem *umem, int npages,
 				     struct ib_umem *umem, int npages,
-				     int page_shift, int access_flags)
+				     int page_shift, int access_flags,
+				     bool populate)
 {
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
 	struct mlx5_ib_mr *mr;
 	struct mlx5_ib_mr *mr;
@@ -1126,15 +1113,19 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
 	if (!mr)
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 
 
-	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
-		sizeof(*pas) * ((npages + 1) / 2) * 2;
+	mr->ibmr.pd = pd;
+	mr->access_flags = access_flags;
+
+	inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+	if (populate)
+		inlen += sizeof(*pas) * roundup(npages, 2);
 	in = kvzalloc(inlen, GFP_KERNEL);
 	in = kvzalloc(inlen, GFP_KERNEL);
 	if (!in) {
 	if (!in) {
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto err_1;
 		goto err_1;
 	}
 	}
 	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
 	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
-	if (!(access_flags & IB_ACCESS_ON_DEMAND))
+	if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
 		mlx5_ib_populate_pas(dev, umem, page_shift, pas,
 		mlx5_ib_populate_pas(dev, umem, page_shift, pas,
 				     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
 				     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
 
 
@@ -1143,23 +1134,27 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
 	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
 	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
 
 
 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+	MLX5_SET(mkc, mkc, free, !populate);
 	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
 	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
 	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
 	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
 	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
 	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
 	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
 	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
 	MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
 	MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
 	MLX5_SET(mkc, mkc, lr, 1);
 	MLX5_SET(mkc, mkc, lr, 1);
+	MLX5_SET(mkc, mkc, umr_en, 1);
 
 
 	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
 	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
 	MLX5_SET64(mkc, mkc, len, length);
 	MLX5_SET64(mkc, mkc, len, length);
 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
 	MLX5_SET(mkc, mkc, translations_octword_size,
 	MLX5_SET(mkc, mkc, translations_octword_size,
-		 get_octo_len(virt_addr, length, 1 << page_shift));
+		 get_octo_len(virt_addr, length, page_shift));
 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
-	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
-		 get_octo_len(virt_addr, length, 1 << page_shift));
+	if (populate) {
+		MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+			 get_octo_len(virt_addr, length, page_shift));
+	}
 
 
 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
 	if (err) {
 	if (err) {
@@ -1168,9 +1163,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
 	}
 	}
 	mr->mmkey.type = MLX5_MKEY_MR;
 	mr->mmkey.type = MLX5_MKEY_MR;
 	mr->desc_size = sizeof(struct mlx5_mtt);
 	mr->desc_size = sizeof(struct mlx5_mtt);
-	mr->umem = umem;
 	mr->dev = dev;
 	mr->dev = dev;
-	mr->live = 1;
 	kvfree(in);
 	kvfree(in);
 
 
 	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
 	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1210,6 +1203,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	int ncont;
 	int ncont;
 	int order;
 	int order;
 	int err;
 	int err;
+	bool use_umr = true;
 
 
 	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
 	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
 		    start, virt_addr, length, access_flags);
 		    start, virt_addr, length, access_flags);
@@ -1228,27 +1222,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
 	err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
 			   &page_shift, &ncont, &order);
 			   &page_shift, &ncont, &order);
 
 
-        if (err < 0)
+	if (err < 0)
 		return ERR_PTR(err);
 		return ERR_PTR(err);
 
 
-	if (use_umr(dev, order)) {
-		mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
-			     order, access_flags);
+	if (order <= mr_cache_max_order(dev)) {
+		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
+					 page_shift, order, access_flags);
 		if (PTR_ERR(mr) == -EAGAIN) {
 		if (PTR_ERR(mr) == -EAGAIN) {
 			mlx5_ib_dbg(dev, "cache empty for order %d", order);
 			mlx5_ib_dbg(dev, "cache empty for order %d", order);
 			mr = NULL;
 			mr = NULL;
 		}
 		}
-	} else if (access_flags & IB_ACCESS_ON_DEMAND &&
-		   !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
-		err = -EINVAL;
-		pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
-		goto error;
+	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
+		if (access_flags & IB_ACCESS_ON_DEMAND) {
+			err = -EINVAL;
+			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+			goto error;
+		}
+		use_umr = false;
 	}
 	}
 
 
 	if (!mr) {
 	if (!mr) {
 		mutex_lock(&dev->slow_path_mutex);
 		mutex_lock(&dev->slow_path_mutex);
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
 		mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-				page_shift, access_flags);
+				page_shift, access_flags, !use_umr);
 		mutex_unlock(&dev->slow_path_mutex);
 		mutex_unlock(&dev->slow_path_mutex);
 	}
 	}
 
 
@@ -1266,8 +1262,22 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	update_odp_mr(mr);
 	update_odp_mr(mr);
 #endif
 #endif
 
 
-	return &mr->ibmr;
+	if (use_umr) {
+		int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
+
+		if (access_flags & IB_ACCESS_ON_DEMAND)
+			update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
 
 
+		err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
+					 update_xlt_flags);
+		if (err) {
+			mlx5_ib_dereg_mr(&mr->ibmr);
+			return ERR_PTR(err);
+		}
+	}
+
+	mr->live = 1;
+	return &mr->ibmr;
 error:
 error:
 	ib_umem_release(umem);
 	ib_umem_release(umem);
 	return ERR_PTR(err);
 	return ERR_PTR(err);
@@ -1355,7 +1365,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 		/*
 		/*
 		 * UMR can't be used - MKey needs to be replaced.
 		 * UMR can't be used - MKey needs to be replaced.
 		 */
 		 */
-		if (mr->umred) {
+		if (mr->allocated_from_cache) {
 			err = unreg_umr(dev, mr);
 			err = unreg_umr(dev, mr);
 			if (err)
 			if (err)
 				mlx5_ib_warn(dev, "Failed to unregister MR\n");
 				mlx5_ib_warn(dev, "Failed to unregister MR\n");
@@ -1368,12 +1378,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 			return err;
 			return err;
 
 
 		mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
 		mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
-				page_shift, access_flags);
+				page_shift, access_flags, true);
 
 
 		if (IS_ERR(mr))
 		if (IS_ERR(mr))
 			return PTR_ERR(mr);
 			return PTR_ERR(mr);
 
 
-		mr->umred = 0;
+		mr->allocated_from_cache = 0;
+		mr->live = 1;
 	} else {
 	} else {
 		/*
 		/*
 		 * Send a UMR WQE
 		 * Send a UMR WQE
@@ -1461,7 +1472,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
 static int clean_mr(struct mlx5_ib_mr *mr)
 static int clean_mr(struct mlx5_ib_mr *mr)
 {
 {
 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
-	int umred = mr->umred;
+	int allocated_from_cache = mr->allocated_from_cache;
 	int err;
 	int err;
 
 
 	if (mr->sig) {
 	if (mr->sig) {
@@ -1479,20 +1490,20 @@ static int clean_mr(struct mlx5_ib_mr *mr)
 
 
 	mlx5_free_priv_descs(mr);
 	mlx5_free_priv_descs(mr);
 
 
-	if (!umred) {
+	if (!allocated_from_cache) {
+		u32 key = mr->mmkey.key;
+
 		err = destroy_mkey(dev, mr);
 		err = destroy_mkey(dev, mr);
+		kfree(mr);
 		if (err) {
 		if (err) {
 			mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
 			mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
-				     mr->mmkey.key, err);
+				     key, err);
 			return err;
 			return err;
 		}
 		}
 	} else {
 	} else {
 		mlx5_mr_cache_free(dev, mr);
 		mlx5_mr_cache_free(dev, mr);
 	}
 	}
 
 
-	if (!umred)
-		kfree(mr);
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 5 - 0
drivers/infiniband/hw/mlx5/qp.c

@@ -1083,11 +1083,16 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
 
 
 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+	if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
+		MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
 	MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
 	MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
 	MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
 	MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
 	MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
 	MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
+	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+	    MLX5_CAP_ETH(dev->mdev, swp))
+		MLX5_SET(sqc, sqc, allow_swp, 1);
 
 
 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);

+ 1 - 1
drivers/infiniband/hw/mthca/mthca_cmd.c

@@ -698,7 +698,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
 		for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
 		for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
 			if (virt != -1) {
 			if (virt != -1) {
 				pages[nent * 2] = cpu_to_be64(virt);
 				pages[nent * 2] = cpu_to_be64(virt);
-				virt += 1 << lg;
+				virt += 1ULL << lg;
 			}
 			}
 
 
 			pages[nent * 2 + 1] =
 			pages[nent * 2 + 1] =

+ 0 - 5
drivers/infiniband/hw/nes/nes_verbs.c

@@ -481,21 +481,16 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
 	props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 	props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
 
 	props->lid = 1;
 	props->lid = 1;
-	props->lmc = 0;
-	props->sm_lid = 0;
-	props->sm_sl = 0;
 	if (netif_queue_stopped(netdev))
 	if (netif_queue_stopped(netdev))
 		props->state = IB_PORT_DOWN;
 		props->state = IB_PORT_DOWN;
 	else if (nesvnic->linkup)
 	else if (nesvnic->linkup)
 		props->state = IB_PORT_ACTIVE;
 		props->state = IB_PORT_ACTIVE;
 	else
 	else
 		props->state = IB_PORT_DOWN;
 		props->state = IB_PORT_DOWN;
-	props->phys_state = 0;
 	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
 	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
 			IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
 			IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
 	props->gid_tbl_len = 1;
 	props->gid_tbl_len = 1;
 	props->pkey_tbl_len = 1;
 	props->pkey_tbl_len = 1;
-	props->qkey_viol_cntr = 0;
 	props->active_width = IB_WIDTH_4X;
 	props->active_width = IB_WIDTH_4X;
 	props->active_speed = IB_SPEED_SDR;
 	props->active_speed = IB_SPEED_SDR;
 	props->max_msg_sz = 0x80000000;
 	props->max_msg_sz = 0x80000000;

+ 2 - 10
drivers/infiniband/hw/usnic/usnic_fwd.c

@@ -110,20 +110,12 @@ void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
 	spin_unlock(&ufdev->lock);
 	spin_unlock(&ufdev->lock);
 }
 }
 
 
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
 {
 {
-	int status;
-
 	spin_lock(&ufdev->lock);
 	spin_lock(&ufdev->lock);
-	if (ufdev->inaddr == 0) {
+	if (!ufdev->inaddr)
 		ufdev->inaddr = inaddr;
 		ufdev->inaddr = inaddr;
-		status = 0;
-	} else {
-		status = -EFAULT;
-	}
 	spin_unlock(&ufdev->lock);
 	spin_unlock(&ufdev->lock);
-
-	return status;
 }
 }
 
 
 void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
 void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)

+ 1 - 1
drivers/infiniband/hw/usnic/usnic_fwd.h

@@ -75,7 +75,7 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
 void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
 void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
 
 
 void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
 void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
 void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
 void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
 void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
 void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
 void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
 void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);

+ 6 - 4
drivers/infiniband/hw/usnic/usnic_ib_main.c

@@ -351,7 +351,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
 {
 {
 	struct usnic_ib_dev *us_ibdev;
 	struct usnic_ib_dev *us_ibdev;
 	union ib_gid gid;
 	union ib_gid gid;
-	struct in_ifaddr *in;
+	struct in_device *ind;
 	struct net_device *netdev;
 	struct net_device *netdev;
 
 
 	usnic_dbg("\n");
 	usnic_dbg("\n");
@@ -441,9 +441,11 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
 	if (netif_carrier_ok(us_ibdev->netdev))
 	if (netif_carrier_ok(us_ibdev->netdev))
 		usnic_fwd_carrier_up(us_ibdev->ufdev);
 		usnic_fwd_carrier_up(us_ibdev->ufdev);
 
 
-	in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
-	if (in != NULL)
-		usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
+	ind = in_dev_get(netdev);
+	if (ind->ifa_list)
+		usnic_fwd_add_ipaddr(us_ibdev->ufdev,
+				     ind->ifa_list->ifa_address);
+	in_dev_put(ind);
 
 
 	usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
 	usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
 				us_ibdev->ufdev->inaddr, &gid.raw[0]);
 				us_ibdev->ufdev->inaddr, &gid.raw[0]);

+ 1 - 1
drivers/infiniband/sw/rxe/rxe_hw_counters.c

@@ -33,7 +33,7 @@
 #include "rxe.h"
 #include "rxe.h"
 #include "rxe_hw_counters.h"
 #include "rxe_hw_counters.h"
 
 
-const char * const rxe_counter_name[] = {
+static const char * const rxe_counter_name[] = {
 	[RXE_CNT_SENT_PKTS]           =  "sent_pkts",
 	[RXE_CNT_SENT_PKTS]           =  "sent_pkts",
 	[RXE_CNT_RCVD_PKTS]           =  "rcvd_pkts",
 	[RXE_CNT_RCVD_PKTS]           =  "rcvd_pkts",
 	[RXE_CNT_DUP_REQ]             =  "duplicate_request",
 	[RXE_CNT_DUP_REQ]             =  "duplicate_request",

+ 1 - 0
drivers/infiniband/ulp/ipoib/ipoib.h

@@ -337,6 +337,7 @@ struct ipoib_dev_priv {
 
 
 	struct rw_semaphore vlan_rwsem;
 	struct rw_semaphore vlan_rwsem;
 	struct mutex mcast_mutex;
 	struct mutex mcast_mutex;
+	struct mutex sysfs_mutex;
 
 
 	struct rb_root  path_tree;
 	struct rb_root  path_tree;
 	struct list_head path_list;
 	struct list_head path_list;

+ 7 - 1
drivers/infiniband/ulp/ipoib/ipoib_cm.c

@@ -1506,9 +1506,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 	if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
 	if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
 		return -EPERM;
 		return -EPERM;
 
 
-	if (!rtnl_trylock())
+	if (!mutex_trylock(&priv->sysfs_mutex))
 		return restart_syscall();
 		return restart_syscall();
 
 
+	if (!rtnl_trylock()) {
+		mutex_unlock(&priv->sysfs_mutex);
+		return restart_syscall();
+	}
+
 	ret = ipoib_set_mode(dev, buf);
 	ret = ipoib_set_mode(dev, buf);
 
 
 	/* The assumption is that the function ipoib_set_mode returned
 	/* The assumption is that the function ipoib_set_mode returned
@@ -1517,6 +1522,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 	 */
 	 */
 	if (ret != -EBUSY)
 	if (ret != -EBUSY)
 		rtnl_unlock();
 		rtnl_unlock();
+	mutex_unlock(&priv->sysfs_mutex);
 
 
 	return (!ret || ret == -EBUSY) ? count : ret;
 	return (!ret || ret == -EBUSY) ? count : ret;
 }
 }

+ 6 - 9
drivers/infiniband/ulp/ipoib/ipoib_main.c

@@ -1893,6 +1893,7 @@ static void ipoib_build_priv(struct net_device *dev)
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->lock);
 	init_rwsem(&priv->vlan_rwsem);
 	init_rwsem(&priv->vlan_rwsem);
 	mutex_init(&priv->mcast_mutex);
 	mutex_init(&priv->mcast_mutex);
+	mutex_init(&priv->sysfs_mutex);
 
 
 	INIT_LIST_HEAD(&priv->path_list);
 	INIT_LIST_HEAD(&priv->path_list);
 	INIT_LIST_HEAD(&priv->child_intfs);
 	INIT_LIST_HEAD(&priv->child_intfs);
@@ -2242,13 +2243,7 @@ static struct net_device *ipoib_add_port(const char *format,
 
 
 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
 	INIT_IB_EVENT_HANDLER(&priv->event_handler,
 			      priv->ca, ipoib_event);
 			      priv->ca, ipoib_event);
-	result = ib_register_event_handler(&priv->event_handler);
-	if (result < 0) {
-		printk(KERN_WARNING "%s: ib_register_event_handler failed for "
-		       "port %d (ret = %d)\n",
-		       hca->name, port, result);
-		goto event_failed;
-	}
+	ib_register_event_handler(&priv->event_handler);
 
 
 	result = register_netdev(priv->dev);
 	result = register_netdev(priv->dev);
 	if (result) {
 	if (result) {
@@ -2281,8 +2276,6 @@ register_failed:
 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
 	set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
 	cancel_delayed_work(&priv->neigh_reap_task);
 	cancel_delayed_work(&priv->neigh_reap_task);
 	flush_workqueue(priv->wq);
 	flush_workqueue(priv->wq);
-
-event_failed:
 	ipoib_dev_cleanup(priv->dev);
 	ipoib_dev_cleanup(priv->dev);
 
 
 device_init_failed:
 device_init_failed:
@@ -2352,7 +2345,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
 		cancel_delayed_work(&priv->neigh_reap_task);
 		cancel_delayed_work(&priv->neigh_reap_task);
 		flush_workqueue(priv->wq);
 		flush_workqueue(priv->wq);
 
 
+		/* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
+		mutex_lock(&priv->sysfs_mutex);
 		unregister_netdev(priv->dev);
 		unregister_netdev(priv->dev);
+		mutex_unlock(&priv->sysfs_mutex);
+
 		rn->free_rdma_netdev(priv->dev);
 		rn->free_rdma_netdev(priv->dev);
 
 
 		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
 		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)

+ 18 - 4
drivers/infiniband/ulp/ipoib/ipoib_vlan.c

@@ -133,12 +133,20 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 	snprintf(intf_name, sizeof intf_name, "%s.%04x",
 	snprintf(intf_name, sizeof intf_name, "%s.%04x",
 		 ppriv->dev->name, pkey);
 		 ppriv->dev->name, pkey);
 
 
-	if (!rtnl_trylock())
+	if (!mutex_trylock(&ppriv->sysfs_mutex))
 		return restart_syscall();
 		return restart_syscall();
 
 
+	if (!rtnl_trylock()) {
+		mutex_unlock(&ppriv->sysfs_mutex);
+		return restart_syscall();
+	}
+
 	priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
 	priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
-	if (!priv)
+	if (!priv) {
+		rtnl_unlock();
+		mutex_unlock(&ppriv->sysfs_mutex);
 		return -ENOMEM;
 		return -ENOMEM;
+	}
 
 
 	down_write(&ppriv->vlan_rwsem);
 	down_write(&ppriv->vlan_rwsem);
 
 
@@ -164,8 +172,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
 
 out:
 out:
 	up_write(&ppriv->vlan_rwsem);
 	up_write(&ppriv->vlan_rwsem);
-
 	rtnl_unlock();
 	rtnl_unlock();
+	mutex_unlock(&ppriv->sysfs_mutex);
 
 
 	if (result) {
 	if (result) {
 		free_netdev(priv->dev);
 		free_netdev(priv->dev);
@@ -188,8 +196,13 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 	if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
 	if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
 		return -EPERM;
 		return -EPERM;
 
 
-	if (!rtnl_trylock())
+	if (!mutex_trylock(&ppriv->sysfs_mutex))
+		return restart_syscall();
+
+	if (!rtnl_trylock()) {
+		mutex_unlock(&ppriv->sysfs_mutex);
 		return restart_syscall();
 		return restart_syscall();
+	}
 
 
 	down_write(&ppriv->vlan_rwsem);
 	down_write(&ppriv->vlan_rwsem);
 	list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
 	list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
@@ -208,6 +221,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 	}
 	}
 
 
 	rtnl_unlock();
 	rtnl_unlock();
+	mutex_unlock(&ppriv->sysfs_mutex);
 
 
 	if (dev) {
 	if (dev) {
 		free_netdev(dev);
 		free_netdev(dev);

+ 2 - 4
drivers/infiniband/ulp/iser/iser_verbs.c

@@ -106,9 +106,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
 
 
 	INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
 	INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
 			      iser_event_handler);
 			      iser_event_handler);
-	if (ib_register_event_handler(&device->event_handler))
-		goto cq_err;
-
+	ib_register_event_handler(&device->event_handler);
 	return 0;
 	return 0;
 
 
 cq_err:
 cq_err:
@@ -141,7 +139,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
 		comp->cq = NULL;
 		comp->cq = NULL;
 	}
 	}
 
 
-	(void)ib_unregister_event_handler(&device->event_handler);
+	ib_unregister_event_handler(&device->event_handler);
 	ib_dealloc_pd(device->pd);
 	ib_dealloc_pd(device->pd);
 
 
 	kfree(device->comps);
 	kfree(device->comps);

+ 1 - 6
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c

@@ -954,12 +954,7 @@ static int vema_register(struct opa_vnic_ctrl_port *cport)
 
 
 		INIT_IB_EVENT_HANDLER(&port->event_handler,
 		INIT_IB_EVENT_HANDLER(&port->event_handler,
 				      cport->ibdev, opa_vnic_event);
 				      cport->ibdev, opa_vnic_event);
-		ret = ib_register_event_handler(&port->event_handler);
-		if (ret) {
-			c_err("port %d: event handler register failed\n", i);
-			vema_unregister(cport);
-			return ret;
-		}
+		ib_register_event_handler(&port->event_handler);
 
 
 		idr_init(&port->vport_idr);
 		idr_init(&port->vport_idr);
 		mutex_init(&port->lock);
 		mutex_init(&port->lock);

+ 2 - 3
drivers/infiniband/ulp/srpt/ib_srpt.c

@@ -2238,7 +2238,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
 				cqe, first_wr);
 				cqe, first_wr);
 		cqe = NULL;
 		cqe = NULL;
 	}
 	}
-	
+
 	ret = ib_post_send(ch->qp, first_wr, &bad_wr);
 	ret = ib_post_send(ch->qp, first_wr, &bad_wr);
 	if (ret) {
 	if (ret) {
 		pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
 		pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
@@ -2530,8 +2530,7 @@ static void srpt_add_one(struct ib_device *device)
 
 
 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
 			      srpt_event_handler);
 			      srpt_event_handler);
-	if (ib_register_event_handler(&sdev->event_handler))
-		goto err_cm;
+	ib_register_event_handler(&sdev->event_handler);
 
 
 	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
 	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
 		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
 		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,

+ 1 - 1
include/linux/mlx5/driver.h

@@ -1093,7 +1093,7 @@ enum {
 };
 };
 
 
 enum {
 enum {
-	MAX_UMR_CACHE_ENTRY = 20,
+	MR_CACHE_LAST_STD_ENTRY = 20,
 	MLX5_IMR_MTT_CACHE_ENTRY,
 	MLX5_IMR_MTT_CACHE_ENTRY,
 	MLX5_IMR_KSM_CACHE_ENTRY,
 	MLX5_IMR_KSM_CACHE_ENTRY,
 	MAX_MR_CACHE_ENTRIES
 	MAX_MR_CACHE_ENTRIES

+ 8 - 4
include/linux/mlx5/mlx5_ifc.h

@@ -295,8 +295,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
 	u8         inner_tcp_dport[0x1];
 	u8         inner_tcp_dport[0x1];
 	u8         inner_tcp_flags[0x1];
 	u8         inner_tcp_flags[0x1];
 	u8         reserved_at_37[0x9];
 	u8         reserved_at_37[0x9];
+	u8         reserved_at_40[0x1a];
+	u8         bth_dst_qp[0x1];
 
 
-	u8         reserved_at_40[0x40];
+	u8         reserved_at_5b[0x25];
 };
 };
 
 
 struct mlx5_ifc_flow_table_prop_layout_bits {
 struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -432,7 +434,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
 	u8         reserved_at_100[0xc];
 	u8         reserved_at_100[0xc];
 	u8         inner_ipv6_flow_label[0x14];
 	u8         inner_ipv6_flow_label[0x14];
 
 
-	u8         reserved_at_120[0xe0];
+	u8         reserved_at_120[0x28];
+	u8         bth_dst_qp[0x18];
+	u8         reserved_at_160[0xa0];
 };
 };
 
 
 struct mlx5_ifc_cmd_pas_bits {
 struct mlx5_ifc_cmd_pas_bits {
@@ -600,7 +604,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
 	u8         rss_ind_tbl_cap[0x4];
 	u8         rss_ind_tbl_cap[0x4];
 	u8         reg_umr_sq[0x1];
 	u8         reg_umr_sq[0x1];
 	u8         scatter_fcs[0x1];
 	u8         scatter_fcs[0x1];
-	u8         reserved_at_1a[0x1];
+	u8         enhanced_multi_pkt_send_wqe[0x1];
 	u8         tunnel_lso_const_out_ip_id[0x1];
 	u8         tunnel_lso_const_out_ip_id[0x1];
 	u8         reserved_at_1c[0x2];
 	u8         reserved_at_1c[0x2];
 	u8         tunnel_statless_gre[0x1];
 	u8         tunnel_statless_gre[0x1];
@@ -2441,7 +2445,7 @@ struct mlx5_ifc_sqc_bits {
 	u8         cd_master[0x1];
 	u8         cd_master[0x1];
 	u8         fre[0x1];
 	u8         fre[0x1];
 	u8         flush_in_error_en[0x1];
 	u8         flush_in_error_en[0x1];
-	u8         reserved_at_4[0x1];
+	u8         allow_multi_pkt_send_wqe[0x1];
 	u8	   min_wqe_inline_mode[0x3];
 	u8	   min_wqe_inline_mode[0x3];
 	u8         state[0x4];
 	u8         state[0x4];
 	u8         reg_umr[0x1];
 	u8         reg_umr[0x1];

+ 5 - 5
include/rdma/ib_verbs.h

@@ -170,7 +170,7 @@ enum ib_device_cap_flags {
 	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
 	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
 	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
 	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
 	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
 	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
-	IB_DEVICE_INIT_TYPE			= (1 << 9),
+	/* Not in use, former INIT_TYPE		= (1 << 9),*/
 	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
 	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
 	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
 	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
 	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
 	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
@@ -185,7 +185,7 @@ enum ib_device_cap_flags {
 	 * which will always contain a usable lkey.
 	 * which will always contain a usable lkey.
 	 */
 	 */
 	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
 	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
-	IB_DEVICE_RESERVED /* old SEND_W_INV */	= (1 << 16),
+	/* Reserved, old SEND_W_INV		= (1 << 16),*/
 	IB_DEVICE_MEM_WINDOW			= (1 << 17),
 	IB_DEVICE_MEM_WINDOW			= (1 << 17),
 	/*
 	/*
 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -220,7 +220,7 @@ enum ib_device_cap_flags {
 	 * of I/O operations with single completion queue managed
 	 * of I/O operations with single completion queue managed
 	 * by hardware.
 	 * by hardware.
 	 */
 	 */
-	IB_DEVICE_CROSS_CHANNEL		= (1 << 27),
+	IB_DEVICE_CROSS_CHANNEL			= (1 << 27),
 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
 	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
 	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
 	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
 	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
@@ -2413,8 +2413,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
 		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
 		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
 		       enum rdma_link_layer ll);
 		       enum rdma_link_layer ll);
 
 
-int ib_register_event_handler  (struct ib_event_handler *event_handler);
-int ib_unregister_event_handler(struct ib_event_handler *event_handler);
+void ib_register_event_handler(struct ib_event_handler *event_handler);
+void ib_unregister_event_handler(struct ib_event_handler *event_handler);
 void ib_dispatch_event(struct ib_event *event);
 void ib_dispatch_event(struct ib_event *event);
 
 
 int ib_query_port(struct ib_device *device,
 int ib_query_port(struct ib_device *device,

+ 2 - 4
include/uapi/rdma/mlx4-abi.h

@@ -98,8 +98,7 @@ struct mlx4_ib_create_srq_resp {
 struct mlx4_ib_create_qp_rss {
 struct mlx4_ib_create_qp_rss {
 	__u64   rx_hash_fields_mask;
 	__u64   rx_hash_fields_mask;
 	__u8    rx_hash_function;
 	__u8    rx_hash_function;
-	__u8    rx_key_len;
-	__u8    reserved[6];
+	__u8    reserved[7];
 	__u8    rx_hash_key[40];
 	__u8    rx_hash_key[40];
 	__u32   comp_mask;
 	__u32   comp_mask;
 	__u32   reserved1;
 	__u32   reserved1;
@@ -111,8 +110,8 @@ struct mlx4_ib_create_qp {
 	__u8	log_sq_bb_count;
 	__u8	log_sq_bb_count;
 	__u8	log_sq_stride;
 	__u8	log_sq_stride;
 	__u8	sq_no_prefetch;
 	__u8	sq_no_prefetch;
-	__u32	inl_recv_sz;
 	__u8	reserved;
 	__u8	reserved;
+	__u32	inl_recv_sz;
 };
 };
 
 
 struct mlx4_ib_create_wq {
 struct mlx4_ib_create_wq {
@@ -121,7 +120,6 @@ struct mlx4_ib_create_wq {
 	__u8	log_range_size;
 	__u8	log_range_size;
 	__u8	reserved[3];
 	__u8	reserved[3];
 	__u32   comp_mask;
 	__u32   comp_mask;
-	__u32   reserved1;
 };
 };
 
 
 struct mlx4_ib_modify_wq {
 struct mlx4_ib_modify_wq {

+ 23 - 0
include/uapi/rdma/mlx5-abi.h

@@ -168,6 +168,28 @@ struct mlx5_packet_pacing_caps {
 	__u32 reserved;
 	__u32 reserved;
 };
 };
 
 
+enum mlx5_ib_mpw_caps {
+	MPW_RESERVED		= 1 << 0,
+	MLX5_IB_ALLOW_MPW	= 1 << 1,
+	MLX5_IB_SUPPORT_EMPW	= 1 << 2,
+};
+
+enum mlx5_ib_sw_parsing_offloads {
+	MLX5_IB_SW_PARSING = 1 << 0,
+	MLX5_IB_SW_PARSING_CSUM = 1 << 1,
+	MLX5_IB_SW_PARSING_LSO = 1 << 2,
+};
+
+struct mlx5_ib_sw_parsing_caps {
+	__u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
+
+	/* Corresponding bit will be set if qp type from
+	 * 'enum ib_qp_type' is supported, e.g.
+	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
+	 */
+	__u32 supported_qpts;
+};
+
 struct mlx5_ib_query_device_resp {
 struct mlx5_ib_query_device_resp {
 	__u32	comp_mask;
 	__u32	comp_mask;
 	__u32	response_length;
 	__u32	response_length;
@@ -177,6 +199,7 @@ struct mlx5_ib_query_device_resp {
 	struct	mlx5_packet_pacing_caps packet_pacing_caps;
 	struct	mlx5_packet_pacing_caps packet_pacing_caps;
 	__u32	mlx5_ib_support_multi_pkt_send_wqes;
 	__u32	mlx5_ib_support_multi_pkt_send_wqes;
 	__u32	reserved;
 	__u32	reserved;
+	struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
 };
 };
 
 
 struct mlx5_ib_create_cq {
 struct mlx5_ib_create_cq {