|
@@ -2814,11 +2814,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
|
kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
|
|
|
sizeof(long),
|
|
|
GFP_KERNEL);
|
|
|
- if (!ibdev->ib_uc_qpns_bitmap) {
|
|
|
- dev_err(&dev->persist->pdev->dev,
|
|
|
- "bit map alloc failed\n");
|
|
|
+ if (!ibdev->ib_uc_qpns_bitmap)
|
|
|
goto err_steer_qp_release;
|
|
|
- }
|
|
|
|
|
|
bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
|
|
|
|
|
@@ -3055,15 +3052,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
|
|
|
first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
|
|
|
|
|
|
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
|
|
|
- if (!dm) {
|
|
|
- pr_err("failed to allocate memory for tunneling qp update\n");
|
|
|
+ if (!dm)
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
for (i = 0; i < ports; i++) {
|
|
|
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
|
|
|
if (!dm[i]) {
|
|
|
- pr_err("failed to allocate memory for tunneling qp update work struct\n");
|
|
|
while (--i >= 0)
|
|
|
kfree(dm[i]);
|
|
|
goto out;
|
|
@@ -3223,8 +3217,6 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
|
|
|
ew->port = port;
|
|
|
ew->ib_dev = ibdev;
|
|
|
queue_work(wq, &ew->work);
|
|
|
- } else {
|
|
|
- pr_err("failed to allocate memory for sl2vl update work\n");
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3284,10 +3276,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
|
|
|
|
|
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
|
|
|
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
|
|
|
- if (!ew) {
|
|
|
- pr_err("failed to allocate memory for events work\n");
|
|
|
+ if (!ew)
|
|
|
break;
|
|
|
- }
|
|
|
|
|
|
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
|
|
|
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
|