|
@@ -182,7 +182,7 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
|
|
goto partial_error;
|
|
goto partial_error;
|
|
skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
|
|
skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
|
|
mapping[1] =
|
|
mapping[1] =
|
|
- ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
|
|
|
|
|
|
+ ib_dma_map_page(priv->ca, page,
|
|
0, PAGE_SIZE, DMA_FROM_DEVICE);
|
|
0, PAGE_SIZE, DMA_FROM_DEVICE);
|
|
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
|
|
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
|
|
goto partial_error;
|
|
goto partial_error;
|
|
@@ -323,7 +323,8 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
|
|
|
|
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
- mapping[i + off] = ib_dma_map_page(ca, frag->page,
|
|
|
|
|
|
+ mapping[i + off] = ib_dma_map_page(ca,
|
|
|
|
+ skb_frag_page(frag),
|
|
frag->page_offset, frag->size,
|
|
frag->page_offset, frag->size,
|
|
DMA_TO_DEVICE);
|
|
DMA_TO_DEVICE);
|
|
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
|
|
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
|