|
@@ -528,7 +528,6 @@ fail:
|
|
|
|
|
|
|
|
|
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
|
|
|
- struct mlx4_en_rx_desc *rx_desc,
|
|
|
struct mlx4_en_rx_alloc *frags,
|
|
|
unsigned int length)
|
|
|
{
|
|
@@ -703,7 +702,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|
|
struct mlx4_cqe *cqe;
|
|
|
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
|
|
|
struct mlx4_en_rx_alloc *frags;
|
|
|
- struct mlx4_en_rx_desc *rx_desc;
|
|
|
struct bpf_prog *xdp_prog;
|
|
|
int doorbell_pending;
|
|
|
struct sk_buff *skb;
|
|
@@ -738,7 +736,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|
|
cq->mcq.cons_index & cq->size)) {
|
|
|
|
|
|
frags = ring->rx_info + (index << priv->log_rx_info);
|
|
|
- rx_desc = ring->buf + (index << ring->log_stride);
|
|
|
|
|
|
/*
|
|
|
* make sure we read the CQE after we read the ownership bit
|
|
@@ -767,7 +764,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|
|
/* Get pointer to first fragment since we haven't
|
|
|
* skb yet and cast it to ethhdr struct
|
|
|
*/
|
|
|
- dma = be64_to_cpu(rx_desc->data[0].addr);
|
|
|
+ dma = frags[0].dma + frags[0].page_offset;
|
|
|
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
|
|
|
DMA_FROM_DEVICE);
|
|
|
ethh = (struct ethhdr *)(page_address(frags[0].page) +
|
|
@@ -806,7 +803,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|
|
void *orig_data;
|
|
|
u32 act;
|
|
|
|
|
|
- dma = be64_to_cpu(rx_desc->data[0].addr);
|
|
|
+ dma = frags[0].dma + frags[0].page_offset;
|
|
|
dma_sync_single_for_cpu(priv->ddev, dma,
|
|
|
priv->frag_info[0].frag_size,
|
|
|
DMA_FROM_DEVICE);
|
|
@@ -946,7 +943,7 @@ xdp_drop_no_cnt:
|
|
|
}
|
|
|
|
|
|
/* GRO not possible, complete processing here */
|
|
|
- skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
|
|
|
+ skb = mlx4_en_rx_skb(priv, frags, length);
|
|
|
if (unlikely(!skb)) {
|
|
|
ring->dropped++;
|
|
|
goto next;
|