|
@@ -33,6 +33,9 @@
|
|
|
* SOFTWARE.
|
|
|
*
|
|
|
*/
|
|
|
+
|
|
|
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
+
|
|
|
#include <linux/dma-mapping.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/module.h>
|
|
@@ -706,7 +709,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
|
|
|
IB_SMI_DISCARD) {
|
|
|
ret = -EINVAL;
|
|
|
- printk(KERN_ERR PFX "Invalid directed route\n");
|
|
|
+ dev_err(&device->dev, "Invalid directed route\n");
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -718,7 +721,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
local = kmalloc(sizeof *local, GFP_ATOMIC);
|
|
|
if (!local) {
|
|
|
ret = -ENOMEM;
|
|
|
- printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
|
|
|
+ dev_err(&device->dev, "No memory for ib_mad_local_private\n");
|
|
|
goto out;
|
|
|
}
|
|
|
local->mad_priv = NULL;
|
|
@@ -726,7 +729,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|
|
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
|
|
|
if (!mad_priv) {
|
|
|
ret = -ENOMEM;
|
|
|
- printk(KERN_ERR PFX "No memory for local response MAD\n");
|
|
|
+ dev_err(&device->dev, "No memory for local response MAD\n");
|
|
|
kfree(local);
|
|
|
goto out;
|
|
|
}
|
|
@@ -837,9 +840,9 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
|
|
|
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
|
|
|
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
|
|
|
if (!seg) {
|
|
|
- printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
|
|
|
- "alloc failed for len %zd, gfp %#x\n",
|
|
|
- sizeof (*seg) + seg_size, gfp_mask);
|
|
|
+ dev_err(&send_buf->mad_agent->device->dev,
|
|
|
+ "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
|
|
|
+ sizeof (*seg) + seg_size, gfp_mask);
|
|
|
free_send_rmpp_list(send_wr);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
@@ -1199,7 +1202,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp);
|
|
|
int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
|
|
|
struct ib_wc *wc)
|
|
|
{
|
|
|
- printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
|
|
|
+ dev_err(&mad_agent->device->dev,
|
|
|
+ "ib_process_mad_wc() not implemented yet\n");
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ib_process_mad_wc);
|
|
@@ -1211,7 +1215,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
|
|
|
|
|
|
for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
|
|
|
if ((*method)->agent[i]) {
|
|
|
- printk(KERN_ERR PFX "Method %d already in use\n", i);
|
|
|
+ pr_err("Method %d already in use\n", i);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
}
|
|
@@ -1223,8 +1227,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
|
|
|
/* Allocate management method table */
|
|
|
*method = kzalloc(sizeof **method, GFP_ATOMIC);
|
|
|
if (!*method) {
|
|
|
- printk(KERN_ERR PFX "No memory for "
|
|
|
- "ib_mad_mgmt_method_table\n");
|
|
|
+ pr_err("No memory for ib_mad_mgmt_method_table\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -1319,8 +1322,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
|
|
/* Allocate management class table for "new" class version */
|
|
|
*class = kzalloc(sizeof **class, GFP_ATOMIC);
|
|
|
if (!*class) {
|
|
|
- printk(KERN_ERR PFX "No memory for "
|
|
|
- "ib_mad_mgmt_class_table\n");
|
|
|
+ dev_err(&agent_priv->agent.device->dev,
|
|
|
+ "No memory for ib_mad_mgmt_class_table\n");
|
|
|
ret = -ENOMEM;
|
|
|
goto error1;
|
|
|
}
|
|
@@ -1386,8 +1389,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
|
|
/* Allocate mgmt vendor class table for "new" class version */
|
|
|
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
|
|
|
if (!vendor) {
|
|
|
- printk(KERN_ERR PFX "No memory for "
|
|
|
- "ib_mad_mgmt_vendor_class_table\n");
|
|
|
+ dev_err(&agent_priv->agent.device->dev,
|
|
|
+ "No memory for ib_mad_mgmt_vendor_class_table\n");
|
|
|
goto error1;
|
|
|
}
|
|
|
|
|
@@ -1397,8 +1400,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
|
|
/* Allocate table for this management vendor class */
|
|
|
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
|
|
|
if (!vendor_class) {
|
|
|
- printk(KERN_ERR PFX "No memory for "
|
|
|
- "ib_mad_mgmt_vendor_class\n");
|
|
|
+ dev_err(&agent_priv->agent.device->dev,
|
|
|
+ "No memory for ib_mad_mgmt_vendor_class\n");
|
|
|
goto error2;
|
|
|
}
|
|
|
|
|
@@ -1429,7 +1432,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
|
|
goto check_in_use;
|
|
|
}
|
|
|
}
|
|
|
- printk(KERN_ERR PFX "All OUI slots in use\n");
|
|
|
+ dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
|
|
|
goto error3;
|
|
|
|
|
|
check_in_use:
|
|
@@ -1640,9 +1643,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
|
|
|
if (mad_agent->agent.recv_handler)
|
|
|
atomic_inc(&mad_agent->refcount);
|
|
|
else {
|
|
|
- printk(KERN_NOTICE PFX "No receive handler for client "
|
|
|
- "%p on port %d\n",
|
|
|
- &mad_agent->agent, port_priv->port_num);
|
|
|
+ dev_notice(&port_priv->device->dev,
|
|
|
+ "No receive handler for client %p on port %d\n",
|
|
|
+ &mad_agent->agent, port_priv->port_num);
|
|
|
mad_agent = NULL;
|
|
|
}
|
|
|
}
|
|
@@ -1658,8 +1661,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num)
|
|
|
|
|
|
/* Make sure MAD base version is understood */
|
|
|
if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
|
|
|
- printk(KERN_ERR PFX "MAD received with unsupported base "
|
|
|
- "version %d\n", mad->mad_hdr.base_version);
|
|
|
+ pr_err("MAD received with unsupported base version %d\n",
|
|
|
+ mad->mad_hdr.base_version);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -1911,8 +1914,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|
|
|
|
|
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
|
|
|
if (!response) {
|
|
|
- printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
|
|
|
- "for response buffer\n");
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "ib_mad_recv_done_handler no memory for response buffer\n");
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -2176,7 +2179,8 @@ retry:
|
|
|
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
|
|
|
&bad_send_wr);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "ib_post_send failed: %d\n", ret);
|
|
|
mad_send_wr = queued_send_wr;
|
|
|
wc->status = IB_WC_LOC_QP_OP_ERR;
|
|
|
goto retry;
|
|
@@ -2248,8 +2252,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
|
|
|
IB_QP_STATE | IB_QP_CUR_STATE);
|
|
|
kfree(attr);
|
|
|
if (ret)
|
|
|
- printk(KERN_ERR PFX "mad_error_handler - "
|
|
|
- "ib_modify_qp to RTS : %d\n", ret);
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "mad_error_handler - ib_modify_qp to RTS : %d\n",
|
|
|
+ ret);
|
|
|
else
|
|
|
mark_sends_for_retry(qp_info);
|
|
|
}
|
|
@@ -2408,7 +2413,8 @@ static void local_completions(struct work_struct *work)
|
|
|
if (local->mad_priv) {
|
|
|
recv_mad_agent = local->recv_mad_agent;
|
|
|
if (!recv_mad_agent) {
|
|
|
- printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
|
|
|
+ dev_err(&mad_agent_priv->agent.device->dev,
|
|
|
+ "No receive MAD agent for local completion\n");
|
|
|
free_mad = 1;
|
|
|
goto local_send_completion;
|
|
|
}
|
|
@@ -2589,7 +2595,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
|
|
} else {
|
|
|
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
|
|
|
if (!mad_priv) {
|
|
|
- printk(KERN_ERR PFX "No memory for receive buffer\n");
|
|
|
+ dev_err(&qp_info->port_priv->device->dev,
|
|
|
+ "No memory for receive buffer\n");
|
|
|
ret = -ENOMEM;
|
|
|
break;
|
|
|
}
|
|
@@ -2625,7 +2632,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
|
|
sizeof mad_priv->header,
|
|
|
DMA_FROM_DEVICE);
|
|
|
kmem_cache_free(ib_mad_cache, mad_priv);
|
|
|
- printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
|
|
|
+ dev_err(&qp_info->port_priv->device->dev,
|
|
|
+ "ib_post_recv failed: %d\n", ret);
|
|
|
break;
|
|
|
}
|
|
|
} while (post);
|
|
@@ -2681,7 +2689,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
|
|
|
|
|
|
attr = kmalloc(sizeof *attr, GFP_KERNEL);
|
|
|
if (!attr) {
|
|
|
- printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "Couldn't kmalloc ib_qp_attr\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -2705,16 +2714,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
|
|
|
ret = ib_modify_qp(qp, attr, IB_QP_STATE |
|
|
|
IB_QP_PKEY_INDEX | IB_QP_QKEY);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "Couldn't change QP%d state to "
|
|
|
- "INIT: %d\n", i, ret);
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "Couldn't change QP%d state to INIT: %d\n",
|
|
|
+ i, ret);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
attr->qp_state = IB_QPS_RTR;
|
|
|
ret = ib_modify_qp(qp, attr, IB_QP_STATE);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "Couldn't change QP%d state to "
|
|
|
- "RTR: %d\n", i, ret);
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "Couldn't change QP%d state to RTR: %d\n",
|
|
|
+ i, ret);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -2722,16 +2733,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
|
|
|
attr->sq_psn = IB_MAD_SEND_Q_PSN;
|
|
|
ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "Couldn't change QP%d state to "
|
|
|
- "RTS: %d\n", i, ret);
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "Couldn't change QP%d state to RTS: %d\n",
|
|
|
+ i, ret);
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "Failed to request completion "
|
|
|
- "notification: %d\n", ret);
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "Failed to request completion notification: %d\n",
|
|
|
+ ret);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -2741,7 +2754,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
|
|
|
|
|
|
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "Couldn't post receive WRs\n");
|
|
|
+ dev_err(&port_priv->device->dev,
|
|
|
+ "Couldn't post receive WRs\n");
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
@@ -2755,7 +2769,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context)
|
|
|
struct ib_mad_qp_info *qp_info = qp_context;
|
|
|
|
|
|
/* It's worse than that! He's dead, Jim! */
|
|
|
- printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
|
|
|
+ dev_err(&qp_info->port_priv->device->dev,
|
|
|
+ "Fatal error (%d) on MAD QP (%d)\n",
|
|
|
event->event, qp_info->qp->qp_num);
|
|
|
}
|
|
|
|
|
@@ -2801,8 +2816,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
|
|
|
qp_init_attr.event_handler = qp_event_handler;
|
|
|
qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
|
|
|
if (IS_ERR(qp_info->qp)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
|
|
|
- get_spl_qp_index(qp_type));
|
|
|
+ dev_err(&qp_info->port_priv->device->dev,
|
|
|
+ "Couldn't create ib_mad QP%d\n",
|
|
|
+ get_spl_qp_index(qp_type));
|
|
|
ret = PTR_ERR(qp_info->qp);
|
|
|
goto error;
|
|
|
}
|
|
@@ -2840,7 +2856,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|
|
/* Create new device info */
|
|
|
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
|
|
|
if (!port_priv) {
|
|
|
- printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
|
|
|
+ dev_err(&device->dev, "No memory for ib_mad_port_private\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -2860,21 +2876,21 @@ static int ib_mad_port_open(struct ib_device *device,
|
|
|
ib_mad_thread_completion_handler,
|
|
|
NULL, port_priv, cq_size, 0);
|
|
|
if (IS_ERR(port_priv->cq)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
|
|
|
+ dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
|
|
|
ret = PTR_ERR(port_priv->cq);
|
|
|
goto error3;
|
|
|
}
|
|
|
|
|
|
port_priv->pd = ib_alloc_pd(device);
|
|
|
if (IS_ERR(port_priv->pd)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
|
|
|
+ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
|
|
|
ret = PTR_ERR(port_priv->pd);
|
|
|
goto error4;
|
|
|
}
|
|
|
|
|
|
port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
|
|
|
if (IS_ERR(port_priv->mr)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
|
|
|
+ dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
|
|
|
ret = PTR_ERR(port_priv->mr);
|
|
|
goto error5;
|
|
|
}
|
|
@@ -2902,7 +2918,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|
|
|
|
|
ret = ib_mad_port_start(port_priv);
|
|
|
if (ret) {
|
|
|
- printk(KERN_ERR PFX "Couldn't start port\n");
|
|
|
+ dev_err(&device->dev, "Couldn't start port\n");
|
|
|
goto error9;
|
|
|
}
|
|
|
|
|
@@ -2946,7 +2962,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
|
|
|
port_priv = __ib_get_mad_port(device, port_num);
|
|
|
if (port_priv == NULL) {
|
|
|
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
|
|
|
- printk(KERN_ERR PFX "Port %d not found\n", port_num);
|
|
|
+ dev_err(&device->dev, "Port %d not found\n", port_num);
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
list_del_init(&port_priv->port_list);
|
|
@@ -2984,14 +3000,12 @@ static void ib_mad_init_device(struct ib_device *device)
|
|
|
|
|
|
for (i = start; i <= end; i++) {
|
|
|
if (ib_mad_port_open(device, i)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't open %s port %d\n",
|
|
|
- device->name, i);
|
|
|
+ dev_err(&device->dev, "Couldn't open port %d\n", i);
|
|
|
goto error;
|
|
|
}
|
|
|
if (ib_agent_port_open(device, i)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't open %s port %d "
|
|
|
- "for agents\n",
|
|
|
- device->name, i);
|
|
|
+ dev_err(&device->dev,
|
|
|
+ "Couldn't open port %d for agents\n", i);
|
|
|
goto error_agent;
|
|
|
}
|
|
|
}
|
|
@@ -2999,20 +3013,17 @@ static void ib_mad_init_device(struct ib_device *device)
|
|
|
|
|
|
error_agent:
|
|
|
if (ib_mad_port_close(device, i))
|
|
|
- printk(KERN_ERR PFX "Couldn't close %s port %d\n",
|
|
|
- device->name, i);
|
|
|
+ dev_err(&device->dev, "Couldn't close port %d\n", i);
|
|
|
|
|
|
error:
|
|
|
i--;
|
|
|
|
|
|
while (i >= start) {
|
|
|
if (ib_agent_port_close(device, i))
|
|
|
- printk(KERN_ERR PFX "Couldn't close %s port %d "
|
|
|
- "for agents\n",
|
|
|
- device->name, i);
|
|
|
+ dev_err(&device->dev,
|
|
|
+ "Couldn't close port %d for agents\n", i);
|
|
|
if (ib_mad_port_close(device, i))
|
|
|
- printk(KERN_ERR PFX "Couldn't close %s port %d\n",
|
|
|
- device->name, i);
|
|
|
+ dev_err(&device->dev, "Couldn't close port %d\n", i);
|
|
|
i--;
|
|
|
}
|
|
|
}
|
|
@@ -3033,12 +3044,12 @@ static void ib_mad_remove_device(struct ib_device *device)
|
|
|
}
|
|
|
for (i = 0; i < num_ports; i++, cur_port++) {
|
|
|
if (ib_agent_port_close(device, cur_port))
|
|
|
- printk(KERN_ERR PFX "Couldn't close %s port %d "
|
|
|
- "for agents\n",
|
|
|
- device->name, cur_port);
|
|
|
+ dev_err(&device->dev,
|
|
|
+ "Couldn't close port %d for agents\n",
|
|
|
+ cur_port);
|
|
|
if (ib_mad_port_close(device, cur_port))
|
|
|
- printk(KERN_ERR PFX "Couldn't close %s port %d\n",
|
|
|
- device->name, cur_port);
|
|
|
+ dev_err(&device->dev, "Couldn't close port %d\n",
|
|
|
+ cur_port);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3064,7 +3075,7 @@ static int __init ib_mad_init_module(void)
|
|
|
SLAB_HWCACHE_ALIGN,
|
|
|
NULL);
|
|
|
if (!ib_mad_cache) {
|
|
|
- printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
|
|
|
+ pr_err("Couldn't create ib_mad cache\n");
|
|
|
ret = -ENOMEM;
|
|
|
goto error1;
|
|
|
}
|
|
@@ -3072,7 +3083,7 @@ static int __init ib_mad_init_module(void)
|
|
|
INIT_LIST_HEAD(&ib_mad_port_list);
|
|
|
|
|
|
if (ib_register_client(&mad_client)) {
|
|
|
- printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
|
|
|
+ pr_err("Couldn't register ib_mad client\n");
|
|
|
ret = -EINVAL;
|
|
|
goto error2;
|
|
|
}
|