|
@@ -163,7 +163,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
|
|
|
|
|
|
static inline bool mlx5e_page_is_reserved(struct page *page)
|
|
|
{
|
|
|
- return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id();
|
|
|
+ return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
|
|
|
}
|
|
|
|
|
|
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
|
|
@@ -177,8 +177,10 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(page_is_pfmemalloc(dma_info->page)))
|
|
|
+ if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
|
|
|
+ rq->stats.cache_waive++;
|
|
|
return false;
|
|
|
+ }
|
|
|
|
|
|
cache->page_cache[cache->tail] = *dma_info;
|
|
|
cache->tail = tail_next;
|
|
@@ -252,7 +254,7 @@ static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
|
|
|
!mlx5e_page_is_reserved(wi->di.page);
|
|
|
}
|
|
|
|
|
|
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
|
|
+static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
|
|
{
|
|
|
struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
|
|
|
|
|
@@ -263,8 +265,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
|
|
wi->offset = 0;
|
|
|
}
|
|
|
|
|
|
- wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset +
|
|
|
- rq->rx_headroom);
|
|
|
+ wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -296,7 +297,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
|
|
|
|
|
|
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
|
|
|
{
|
|
|
- return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
|
|
|
+ return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
|
|
|
}
|
|
|
|
|
|
static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
|
|
@@ -305,7 +306,7 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
|
|
|
u32 page_idx, u32 frag_offset,
|
|
|
u32 len)
|
|
|
{
|
|
|
- unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
|
|
|
+ unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz));
|
|
|
|
|
|
dma_sync_single_for_cpu(rq->pdev,
|
|
|
wi->umr.dma_info[page_idx].addr + frag_offset,
|
|
@@ -358,7 +359,6 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
|
|
|
/* fill sq edge with nops to avoid wqe wrap around */
|
|
|
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
|
|
|
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
|
|
- sq->db.ico_wqe[pi].num_wqebbs = 1;
|
|
|
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
|
|
}
|
|
|
|
|
@@ -369,41 +369,35 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
|
|
|
MLX5_OPCODE_UMR);
|
|
|
|
|
|
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
|
|
|
- sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
|
|
|
sq->pc += num_wqebbs;
|
|
|
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
|
|
|
}
|
|
|
|
|
|
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
|
|
|
- struct mlx5e_rx_wqe *wqe,
|
|
|
u16 ix)
|
|
|
{
|
|
|
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
|
|
|
- u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
|
|
|
int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
|
|
|
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
|
|
|
int err;
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
|
|
|
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
|
|
|
-
|
|
|
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
|
|
|
err = mlx5e_page_alloc_mapped(rq, dma_info);
|
|
|
if (unlikely(err))
|
|
|
goto err_unmap;
|
|
|
wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
|
|
|
page_ref_add(dma_info->page, pg_strides);
|
|
|
- wi->skbs_frags[i] = 0;
|
|
|
}
|
|
|
|
|
|
+ memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE);
|
|
|
wi->consumed_strides = 0;
|
|
|
- wqe->data.addr = cpu_to_be64(dma_offset);
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
err_unmap:
|
|
|
while (--i >= 0) {
|
|
|
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
|
|
|
-
|
|
|
+ dma_info--;
|
|
|
page_ref_sub(dma_info->page, pg_strides);
|
|
|
mlx5e_page_release(rq, dma_info, true);
|
|
|
}
|
|
@@ -414,27 +408,21 @@ err_unmap:
|
|
|
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
|
|
|
{
|
|
|
int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
|
|
|
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
|
|
|
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
|
|
|
-
|
|
|
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
|
|
|
page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
|
|
|
mlx5e_page_release(rq, dma_info, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
|
|
|
+static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
|
|
|
{
|
|
|
struct mlx5_wq_ll *wq = &rq->wq;
|
|
|
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
|
|
|
|
|
|
- clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
|
|
|
-
|
|
|
- if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
|
|
|
- mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
|
|
|
- return;
|
|
|
- }
|
|
|
+ rq->mpwqe.umr_in_progress = false;
|
|
|
|
|
|
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
|
|
|
|
|
@@ -444,16 +432,18 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
|
|
|
mlx5_wq_ll_update_db_record(wq);
|
|
|
}
|
|
|
|
|
|
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
|
|
+static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|
|
{
|
|
|
int err;
|
|
|
|
|
|
- err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
|
|
|
- if (unlikely(err))
|
|
|
+ err = mlx5e_alloc_rx_umr_mpwqe(rq, ix);
|
|
|
+ if (unlikely(err)) {
|
|
|
+ rq->stats.buff_alloc_err++;
|
|
|
return err;
|
|
|
- set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
|
|
|
+ }
|
|
|
+ rq->mpwqe.umr_in_progress = true;
|
|
|
mlx5e_post_umr_wqe(rq, ix);
|
|
|
- return -EBUSY;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|
@@ -463,94 +453,150 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|
|
mlx5e_free_rx_mpwqe(rq, wi);
|
|
|
}
|
|
|
|
|
|
-#define RQ_CANNOT_POST(rq) \
|
|
|
- (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
|
|
|
- test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
|
|
|
-
|
|
|
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
|
|
{
|
|
|
struct mlx5_wq_ll *wq = &rq->wq;
|
|
|
+ int err;
|
|
|
|
|
|
- if (unlikely(RQ_CANNOT_POST(rq)))
|
|
|
+ if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
|
|
|
return false;
|
|
|
|
|
|
- while (!mlx5_wq_ll_is_full(wq)) {
|
|
|
+ if (mlx5_wq_ll_is_full(wq))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ do {
|
|
|
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
|
|
|
- int err;
|
|
|
|
|
|
- err = rq->alloc_wqe(rq, wqe, wq->head);
|
|
|
- if (err == -EBUSY)
|
|
|
- return true;
|
|
|
+ err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
|
|
|
if (unlikely(err)) {
|
|
|
rq->stats.buff_alloc_err++;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
|
|
|
- }
|
|
|
+ } while (!mlx5_wq_ll_is_full(wq));
|
|
|
|
|
|
/* ensure wqes are visible to device before updating doorbell record */
|
|
|
dma_wmb();
|
|
|
|
|
|
mlx5_wq_ll_update_db_record(wq);
|
|
|
|
|
|
- return !mlx5_wq_ll_is_full(wq);
|
|
|
+ return !!err;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
|
|
|
+ struct mlx5e_icosq *sq,
|
|
|
+ struct mlx5e_rq *rq,
|
|
|
+ struct mlx5_cqe64 *cqe)
|
|
|
+{
|
|
|
+ struct mlx5_wq_cyc *wq = &sq->wq;
|
|
|
+ u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
|
|
|
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
|
|
|
+
|
|
|
+ mlx5_cqwq_pop(&cq->wq);
|
|
|
+
|
|
|
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
|
|
|
+ WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
|
|
|
+ cqe->op_own);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
|
|
|
+ mlx5e_post_rx_mpwqe(rq);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
|
|
|
+ WARN_ONCE(true,
|
|
|
+ "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
|
|
|
+ icowi->opcode);
|
|
|
+}
|
|
|
+
|
|
|
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
|
|
+{
|
|
|
+ struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
|
|
|
+ struct mlx5_cqe64 *cqe;
|
|
|
+
|
|
|
+ if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
|
|
+ if (likely(!cqe))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* by design, there's only a single cqe */
|
|
|
+ mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe);
|
|
|
+
|
|
|
+ mlx5_cqwq_update_db_record(&cq->wq);
|
|
|
+}
|
|
|
+
|
|
|
+bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
|
|
+{
|
|
|
+ struct mlx5_wq_ll *wq = &rq->wq;
|
|
|
+
|
|
|
+ if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
|
|
|
+
|
|
|
+ if (mlx5_wq_ll_is_full(wq))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (!rq->mpwqe.umr_in_progress)
|
|
|
+ mlx5e_alloc_rx_mpwqe(rq, wq->head);
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
|
|
u32 cqe_bcnt)
|
|
|
{
|
|
|
struct ethhdr *eth = (struct ethhdr *)(skb->data);
|
|
|
- struct iphdr *ipv4;
|
|
|
- struct ipv6hdr *ipv6;
|
|
|
struct tcphdr *tcp;
|
|
|
int network_depth = 0;
|
|
|
__be16 proto;
|
|
|
u16 tot_len;
|
|
|
+ void *ip_p;
|
|
|
|
|
|
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
|
|
- int tcp_ack = ((l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
|
|
- (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA));
|
|
|
+ u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
|
|
+ (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
|
|
|
|
|
skb->mac_len = ETH_HLEN;
|
|
|
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
|
|
|
|
|
|
- ipv4 = (struct iphdr *)(skb->data + network_depth);
|
|
|
- ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
|
|
|
tot_len = cqe_bcnt - network_depth;
|
|
|
+ ip_p = skb->data + network_depth;
|
|
|
|
|
|
if (proto == htons(ETH_P_IP)) {
|
|
|
- tcp = (struct tcphdr *)(skb->data + network_depth +
|
|
|
- sizeof(struct iphdr));
|
|
|
- ipv6 = NULL;
|
|
|
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
|
|
|
- } else {
|
|
|
- tcp = (struct tcphdr *)(skb->data + network_depth +
|
|
|
- sizeof(struct ipv6hdr));
|
|
|
- ipv4 = NULL;
|
|
|
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
|
|
|
- }
|
|
|
-
|
|
|
- if (get_cqe_lro_tcppsh(cqe))
|
|
|
- tcp->psh = 1;
|
|
|
+ struct iphdr *ipv4 = ip_p;
|
|
|
|
|
|
- if (tcp_ack) {
|
|
|
- tcp->ack = 1;
|
|
|
- tcp->ack_seq = cqe->lro_ack_seq_num;
|
|
|
- tcp->window = cqe->lro_tcp_win;
|
|
|
- }
|
|
|
+ tcp = ip_p + sizeof(struct iphdr);
|
|
|
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
|
|
|
|
|
|
- if (ipv4) {
|
|
|
ipv4->ttl = cqe->lro_min_ttl;
|
|
|
ipv4->tot_len = cpu_to_be16(tot_len);
|
|
|
ipv4->check = 0;
|
|
|
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
|
|
|
ipv4->ihl);
|
|
|
} else {
|
|
|
+ struct ipv6hdr *ipv6 = ip_p;
|
|
|
+
|
|
|
+ tcp = ip_p + sizeof(struct ipv6hdr);
|
|
|
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
|
|
|
+
|
|
|
ipv6->hop_limit = cqe->lro_min_ttl;
|
|
|
ipv6->payload_len = cpu_to_be16(tot_len -
|
|
|
sizeof(struct ipv6hdr));
|
|
|
}
|
|
|
+
|
|
|
+ tcp->psh = get_cqe_lro_tcppsh(cqe);
|
|
|
+
|
|
|
+ if (tcp_ack) {
|
|
|
+ tcp->ack = 1;
|
|
|
+ tcp->ack_seq = cqe->lro_ack_seq_num;
|
|
|
+ tcp->window = cqe->lro_tcp_win;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
|
|
@@ -776,9 +822,9 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|
|
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
|
|
|
{
|
|
|
struct mlx5e_dma_info *di = &wi->di;
|
|
|
+ u16 rx_headroom = rq->buff.headroom;
|
|
|
struct sk_buff *skb;
|
|
|
void *va, *data;
|
|
|
- u16 rx_headroom = rq->rx_headroom;
|
|
|
bool consumed;
|
|
|
u32 frag_size;
|
|
|
|
|
@@ -911,7 +957,7 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
|
|
|
struct sk_buff *skb)
|
|
|
{
|
|
|
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
|
|
|
- u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
|
|
|
+ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
|
|
|
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
|
|
|
u32 page_idx = wqe_offset >> PAGE_SHIFT;
|
|
|
u32 head_page_idx = page_idx;
|
|
@@ -979,7 +1025,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
|
|
napi_gro_receive(rq->cq.napi, skb);
|
|
|
|
|
|
mpwrq_cqe_out:
|
|
|
- if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
|
|
|
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
|
|
|
return;
|
|
|
|
|
|
mlx5e_free_rx_mpwqe(rq, wi);
|
|
@@ -989,21 +1035,23 @@ mpwrq_cqe_out:
|
|
|
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
|
|
{
|
|
|
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
|
|
|
- struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
|
|
|
+ struct mlx5e_xdpsq *xdpsq;
|
|
|
+ struct mlx5_cqe64 *cqe;
|
|
|
int work_done = 0;
|
|
|
|
|
|
- if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
|
|
|
+ if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
|
|
|
return 0;
|
|
|
|
|
|
if (cq->decmprs_left)
|
|
|
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
|
|
|
|
|
|
- for (; work_done < budget; work_done++) {
|
|
|
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
|
|
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
|
|
+ if (!cqe)
|
|
|
+ return 0;
|
|
|
|
|
|
- if (!cqe)
|
|
|
- break;
|
|
|
+ xdpsq = &rq->xdpsq;
|
|
|
|
|
|
+ do {
|
|
|
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
|
|
|
work_done +=
|
|
|
mlx5e_decompress_cqes_start(rq, cq,
|
|
@@ -1014,7 +1062,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
|
|
mlx5_cqwq_pop(&cq->wq);
|
|
|
|
|
|
rq->handle_rx_cqe(rq, cqe);
|
|
|
- }
|
|
|
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
|
|
|
|
|
if (xdpsq->db.doorbell) {
|
|
|
mlx5e_xmit_xdp_doorbell(xdpsq);
|
|
@@ -1032,13 +1080,18 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
|
|
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|
|
{
|
|
|
struct mlx5e_xdpsq *sq;
|
|
|
+ struct mlx5_cqe64 *cqe;
|
|
|
struct mlx5e_rq *rq;
|
|
|
u16 sqcc;
|
|
|
int i;
|
|
|
|
|
|
sq = container_of(cq, struct mlx5e_xdpsq, cq);
|
|
|
|
|
|
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
|
|
|
+ if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
|
|
+ if (!cqe)
|
|
|
return false;
|
|
|
|
|
|
rq = container_of(sq, struct mlx5e_rq, xdpsq);
|
|
@@ -1048,15 +1101,11 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|
|
*/
|
|
|
sqcc = sq->cc;
|
|
|
|
|
|
- for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
|
|
|
- struct mlx5_cqe64 *cqe;
|
|
|
+ i = 0;
|
|
|
+ do {
|
|
|
u16 wqe_counter;
|
|
|
bool last_wqe;
|
|
|
|
|
|
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
|
|
|
- if (!cqe)
|
|
|
- break;
|
|
|
-
|
|
|
mlx5_cqwq_pop(&cq->wq);
|
|
|
|
|
|
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
|
@@ -1074,7 +1123,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|
|
/* Recycle RX page */
|
|
|
mlx5e_page_release(rq, di, true);
|
|
|
} while (!last_wqe);
|
|
|
- }
|
|
|
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
|
|
|
|
|
mlx5_cqwq_update_db_record(&cq->wq);
|
|
|
|