|
@@ -1022,12 +1022,21 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
|
|
mad_send_wr->send_buf.mad,
|
|
|
sge[0].length,
|
|
|
DMA_TO_DEVICE);
|
|
|
+ if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
mad_send_wr->header_mapping = sge[0].addr;
|
|
|
|
|
|
sge[1].addr = ib_dma_map_single(mad_agent->device,
|
|
|
ib_get_payload(mad_send_wr),
|
|
|
sge[1].length,
|
|
|
DMA_TO_DEVICE);
|
|
|
+ if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
|
|
|
+ ib_dma_unmap_single(mad_agent->device,
|
|
|
+ mad_send_wr->header_mapping,
|
|
|
+ sge[0].length, DMA_TO_DEVICE);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
mad_send_wr->payload_mapping = sge[1].addr;
|
|
|
|
|
|
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
|
|
@@ -2590,6 +2599,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
|
|
sizeof *mad_priv -
|
|
|
sizeof mad_priv->header,
|
|
|
DMA_FROM_DEVICE);
|
|
|
+ if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
|
|
|
+ sg_list.addr))) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ break;
|
|
|
+ }
|
|
|
mad_priv->header.mapping = sg_list.addr;
|
|
|
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
|
|
|
mad_priv->header.mad_list.mad_queue = recv_queue;
|