|
@@ -3,6 +3,7 @@
|
|
|
* Copyright (c) 2005 Intel Corporation. All rights reserved.
|
|
|
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
|
|
|
* Copyright (c) 2009 HNR Consulting. All rights reserved.
|
|
|
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
|
|
|
*
|
|
|
* This software is available to you under a choice of one of two
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
@@ -44,6 +45,7 @@
|
|
|
#include "mad_priv.h"
|
|
|
#include "mad_rmpp.h"
|
|
|
#include "smi.h"
|
|
|
+#include "opa_smi.h"
|
|
|
#include "agent.h"
|
|
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
@@ -751,6 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
{
|
|
|
int ret = 0;
|
|
|
struct ib_smp *smp = mad_send_wr->send_buf.mad;
|
|
|
+ struct opa_smp *opa_smp = (struct opa_smp *)smp;
|
|
|
unsigned long flags;
|
|
|
struct ib_mad_local_private *local;
|
|
|
struct ib_mad_private *mad_priv;
|
|
@@ -762,6 +765,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
|
|
|
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
|
|
|
u16 out_mad_pkey_index = 0;
|
|
|
+ u16 drslid;
|
|
|
+ bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
|
|
|
+ mad_agent_priv->qp_info->port_priv->port_num);
|
|
|
|
|
|
if (device->node_type == RDMA_NODE_IB_SWITCH &&
|
|
|
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
|
|
@@ -775,19 +781,48 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
* If we are at the start of the LID routed part, don't update the
|
|
|
* hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
|
|
|
*/
|
|
|
- if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
|
|
|
- IB_LID_PERMISSIVE &&
|
|
|
- smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
|
|
|
- IB_SMI_DISCARD) {
|
|
|
- ret = -EINVAL;
|
|
|
- dev_err(&device->dev, "Invalid directed route\n");
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
|
|
|
+ u32 opa_drslid;
|
|
|
+
|
|
|
+ if ((opa_get_smp_direction(opa_smp)
|
|
|
+ ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
|
|
|
+ OPA_LID_PERMISSIVE &&
|
|
|
+ opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
|
|
|
+ port_num) == IB_SMI_DISCARD) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ dev_err(&device->dev, "OPA Invalid directed route\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
|
|
|
+ if (opa_drslid != OPA_LID_PERMISSIVE &&
|
|
|
+ opa_drslid & 0xffff0000) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
|
|
|
+ opa_drslid);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ drslid = (u16)(opa_drslid & 0x0000ffff);
|
|
|
|
|
|
- /* Check to post send on QP or process locally */
|
|
|
- if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
|
|
|
- smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
|
|
|
- goto out;
|
|
|
+ /* Check to post send on QP or process locally */
|
|
|
+ if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
|
|
|
+ opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
|
|
|
+ goto out;
|
|
|
+ } else {
|
|
|
+ if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
|
|
|
+ IB_LID_PERMISSIVE &&
|
|
|
+ smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
|
|
|
+ IB_SMI_DISCARD) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ dev_err(&device->dev, "Invalid directed route\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ drslid = be16_to_cpu(smp->dr_slid);
|
|
|
+
|
|
|
+ /* Check to post send on QP or process locally */
|
|
|
+ if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
|
|
|
+ smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
local = kmalloc(sizeof *local, GFP_ATOMIC);
|
|
|
if (!local) {
|
|
@@ -806,10 +841,16 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
}
|
|
|
|
|
|
build_smp_wc(mad_agent_priv->agent.qp,
|
|
|
- send_wr->wr_id, be16_to_cpu(smp->dr_slid),
|
|
|
+ send_wr->wr_id, drslid,
|
|
|
send_wr->wr.ud.pkey_index,
|
|
|
send_wr->wr.ud.port_num, &mad_wc);
|
|
|
|
|
|
+ if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
|
|
|
+ mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
|
|
|
+ + mad_send_wr->send_buf.data_len
|
|
|
+ + sizeof(struct ib_grh);
|
|
|
+ }
|
|
|
+
|
|
|
/* No GRH for DR SMP */
|
|
|
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
|
|
|
(const struct ib_mad_hdr *)smp, mad_size,
|
|
@@ -861,6 +902,10 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
}
|
|
|
|
|
|
local->mad_send_wr = mad_send_wr;
|
|
|
+ if (opa) {
|
|
|
+ local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
|
|
|
+ local->return_wc_byte_len = mad_size;
|
|
|
+ }
|
|
|
/* Reference MAD agent until send side of local completion handled */
|
|
|
atomic_inc(&mad_agent_priv->refcount);
|
|
|
/* Queue local completion to local list */
|
|
@@ -1754,14 +1799,18 @@ out:
|
|
|
return mad_agent;
|
|
|
}
|
|
|
|
|
|
-static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
|
|
|
+static int validate_mad(const struct ib_mad_hdr *mad_hdr,
|
|
|
+ const struct ib_mad_qp_info *qp_info,
|
|
|
+ bool opa)
|
|
|
{
|
|
|
int valid = 0;
|
|
|
+ u32 qp_num = qp_info->qp->qp_num;
|
|
|
|
|
|
/* Make sure MAD base version is understood */
|
|
|
- if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
|
|
|
- pr_err("MAD received with unsupported base version %d\n",
|
|
|
- mad_hdr->base_version);
|
|
|
+ if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
|
|
|
+ (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
|
|
|
+ pr_err("MAD received with unsupported base version %d %s\n",
|
|
|
+ mad_hdr->base_version, opa ? "(opa)" : "");
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -2011,7 +2060,8 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
|
|
|
port_priv->device,
|
|
|
smi_get_fwd_port(smp),
|
|
|
qp_info->qp->qp_num,
|
|
|
- response->mad_size);
|
|
|
+ response->mad_size,
|
|
|
+ false);
|
|
|
|
|
|
return IB_SMI_DISCARD;
|
|
|
}
|
|
@@ -2019,7 +2069,8 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
|
|
|
}
|
|
|
|
|
|
static bool generate_unmatched_resp(const struct ib_mad_private *recv,
|
|
|
- struct ib_mad_private *response)
|
|
|
+ struct ib_mad_private *response,
|
|
|
+ size_t *resp_len, bool opa)
|
|
|
{
|
|
|
const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
|
|
|
struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
|
|
@@ -2035,11 +2086,96 @@ static bool generate_unmatched_resp(const struct ib_mad_private *recv,
|
|
|
if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
|
|
|
resp_hdr->status |= IB_SMP_DIRECTION;
|
|
|
|
|
|
+ if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
|
|
|
+ if (recv_hdr->mgmt_class ==
|
|
|
+ IB_MGMT_CLASS_SUBN_LID_ROUTED ||
|
|
|
+ recv_hdr->mgmt_class ==
|
|
|
+ IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
|
|
|
+ *resp_len = opa_get_smp_header_size(
|
|
|
+ (struct opa_smp *)recv->mad);
|
|
|
+ else
|
|
|
+ *resp_len = sizeof(struct ib_mad_hdr);
|
|
|
+ }
|
|
|
+
|
|
|
return true;
|
|
|
} else {
|
|
|
return false;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+static enum smi_action
|
|
|
+handle_opa_smi(struct ib_mad_port_private *port_priv,
|
|
|
+ struct ib_mad_qp_info *qp_info,
|
|
|
+ struct ib_wc *wc,
|
|
|
+ int port_num,
|
|
|
+ struct ib_mad_private *recv,
|
|
|
+ struct ib_mad_private *response)
|
|
|
+{
|
|
|
+ enum smi_forward_action retsmi;
|
|
|
+ struct opa_smp *smp = (struct opa_smp *)recv->mad;
|
|
|
+
|
|
|
+ if (opa_smi_handle_dr_smp_recv(smp,
|
|
|
+ port_priv->device->node_type,
|
|
|
+ port_num,
|
|
|
+ port_priv->device->phys_port_cnt) ==
|
|
|
+ IB_SMI_DISCARD)
|
|
|
+ return IB_SMI_DISCARD;
|
|
|
+
|
|
|
+ retsmi = opa_smi_check_forward_dr_smp(smp);
|
|
|
+ if (retsmi == IB_SMI_LOCAL)
|
|
|
+ return IB_SMI_HANDLE;
|
|
|
+
|
|
|
+ if (retsmi == IB_SMI_SEND) { /* don't forward */
|
|
|
+ if (opa_smi_handle_dr_smp_send(smp,
|
|
|
+ port_priv->device->node_type,
|
|
|
+ port_num) == IB_SMI_DISCARD)
|
|
|
+ return IB_SMI_DISCARD;
|
|
|
+
|
|
|
+ if (opa_smi_check_local_smp(smp, port_priv->device) ==
|
|
|
+ IB_SMI_DISCARD)
|
|
|
+ return IB_SMI_DISCARD;
|
|
|
+
|
|
|
+ } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
|
|
|
+ /* forward case for switches */
|
|
|
+ memcpy(response, recv, mad_priv_size(response));
|
|
|
+ response->header.recv_wc.wc = &response->header.wc;
|
|
|
+ response->header.recv_wc.recv_buf.opa_mad =
|
|
|
+ (struct opa_mad *)response->mad;
|
|
|
+ response->header.recv_wc.recv_buf.grh = &response->grh;
|
|
|
+
|
|
|
+ agent_send_response((const struct ib_mad_hdr *)response->mad,
|
|
|
+ &response->grh, wc,
|
|
|
+ port_priv->device,
|
|
|
+ opa_smi_get_fwd_port(smp),
|
|
|
+ qp_info->qp->qp_num,
|
|
|
+ recv->header.wc.byte_len,
|
|
|
+ true);
|
|
|
+
|
|
|
+ return IB_SMI_DISCARD;
|
|
|
+ }
|
|
|
+
|
|
|
+ return IB_SMI_HANDLE;
|
|
|
+}
|
|
|
+
|
|
|
+static enum smi_action
|
|
|
+handle_smi(struct ib_mad_port_private *port_priv,
|
|
|
+ struct ib_mad_qp_info *qp_info,
|
|
|
+ struct ib_wc *wc,
|
|
|
+ int port_num,
|
|
|
+ struct ib_mad_private *recv,
|
|
|
+ struct ib_mad_private *response,
|
|
|
+ bool opa)
|
|
|
+{
|
|
|
+ struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
|
|
|
+
|
|
|
+ if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
|
|
|
+ mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
|
|
|
+ return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
|
|
|
+ response);
|
|
|
+
|
|
|
+ return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
|
|
|
+}
|
|
|
+
|
|
|
static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
struct ib_wc *wc)
|
|
|
{
|
|
@@ -2052,11 +2188,15 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
int ret = IB_MAD_RESULT_SUCCESS;
|
|
|
size_t mad_size;
|
|
|
u16 resp_mad_pkey_index = 0;
|
|
|
+ bool opa;
|
|
|
|
|
|
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
|
|
|
qp_info = mad_list->mad_queue->qp_info;
|
|
|
dequeue_mad(mad_list);
|
|
|
|
|
|
+ opa = rdma_cap_opa_mad(qp_info->port_priv->device,
|
|
|
+ qp_info->port_priv->port_num);
|
|
|
+
|
|
|
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
|
|
|
mad_list);
|
|
|
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
|
|
@@ -2068,7 +2208,15 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
/* Setup MAD receive work completion from "normal" work completion */
|
|
|
recv->header.wc = *wc;
|
|
|
recv->header.recv_wc.wc = &recv->header.wc;
|
|
|
- recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
|
|
|
+
|
|
|
+ if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
|
|
|
+ recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
|
|
|
+ recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
|
|
|
+ } else {
|
|
|
+ recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
|
|
|
+ recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
|
|
|
+ }
|
|
|
+
|
|
|
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
|
|
|
recv->header.recv_wc.recv_buf.grh = &recv->grh;
|
|
|
|
|
@@ -2076,7 +2224,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
|
|
|
|
|
|
/* Validate MAD */
|
|
|
- if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info->qp->qp_num))
|
|
|
+ if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
|
|
|
goto out;
|
|
|
|
|
|
mad_size = recv->mad_size;
|
|
@@ -2094,8 +2242,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
|
|
|
if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
|
|
|
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
|
|
|
- if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv,
|
|
|
- response)
|
|
|
+ if (handle_smi(port_priv, qp_info, wc, port_num, recv,
|
|
|
+ response, opa)
|
|
|
== IB_SMI_DISCARD)
|
|
|
goto out;
|
|
|
}
|
|
@@ -2109,6 +2257,10 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
recv->mad_size,
|
|
|
(struct ib_mad_hdr *)response->mad,
|
|
|
&mad_size, &resp_mad_pkey_index);
|
|
|
+
|
|
|
+ if (opa)
|
|
|
+ wc->pkey_index = resp_mad_pkey_index;
|
|
|
+
|
|
|
if (ret & IB_MAD_RESULT_SUCCESS) {
|
|
|
if (ret & IB_MAD_RESULT_CONSUMED)
|
|
|
goto out;
|
|
@@ -2118,7 +2270,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
port_priv->device,
|
|
|
port_num,
|
|
|
qp_info->qp->qp_num,
|
|
|
- response->mad_size);
|
|
|
+ mad_size, opa);
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
@@ -2133,10 +2285,10 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
*/
|
|
|
recv = NULL;
|
|
|
} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
|
|
|
- generate_unmatched_resp(recv, response)) {
|
|
|
+ generate_unmatched_resp(recv, response, &mad_size, opa)) {
|
|
|
agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
|
|
|
port_priv->device, port_num,
|
|
|
- qp_info->qp->qp_num, response->mad_size);
|
|
|
+ qp_info->qp->qp_num, mad_size, opa);
|
|
|
}
|
|
|
|
|
|
out:
|
|
@@ -2537,10 +2689,14 @@ static void local_completions(struct work_struct *work)
|
|
|
int free_mad;
|
|
|
struct ib_wc wc;
|
|
|
struct ib_mad_send_wc mad_send_wc;
|
|
|
+ bool opa;
|
|
|
|
|
|
mad_agent_priv =
|
|
|
container_of(work, struct ib_mad_agent_private, local_work);
|
|
|
|
|
|
+ opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
|
|
|
+ mad_agent_priv->qp_info->port_priv->port_num);
|
|
|
+
|
|
|
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
|
|
while (!list_empty(&mad_agent_priv->local_list)) {
|
|
|
local = list_entry(mad_agent_priv->local_list.next,
|
|
@@ -2550,6 +2706,7 @@ static void local_completions(struct work_struct *work)
|
|
|
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
|
|
free_mad = 0;
|
|
|
if (local->mad_priv) {
|
|
|
+ u8 base_version;
|
|
|
recv_mad_agent = local->recv_mad_agent;
|
|
|
if (!recv_mad_agent) {
|
|
|
dev_err(&mad_agent_priv->agent.device->dev,
|
|
@@ -2565,11 +2722,20 @@ static void local_completions(struct work_struct *work)
|
|
|
build_smp_wc(recv_mad_agent->agent.qp,
|
|
|
(unsigned long) local->mad_send_wr,
|
|
|
be16_to_cpu(IB_LID_PERMISSIVE),
|
|
|
- 0, recv_mad_agent->agent.port_num, &wc);
|
|
|
+ local->mad_send_wr->send_wr.wr.ud.pkey_index,
|
|
|
+ recv_mad_agent->agent.port_num, &wc);
|
|
|
|
|
|
local->mad_priv->header.recv_wc.wc = &wc;
|
|
|
- local->mad_priv->header.recv_wc.mad_len =
|
|
|
- sizeof(struct ib_mad);
|
|
|
+
|
|
|
+ base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
|
|
|
+ if (opa && base_version == OPA_MGMT_BASE_VERSION) {
|
|
|
+ local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
|
|
|
+ local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
|
|
|
+ } else {
|
|
|
+ local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
|
|
|
+ local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
|
|
|
+ }
|
|
|
+
|
|
|
INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
|
|
|
list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
|
|
|
&local->mad_priv->header.recv_wc.rmpp_list);
|