|
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
|
|
{
|
|
|
struct gnttab_map_grant_ref *gop_map = *gopp_map;
|
|
|
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
|
|
+ /* This always points to the shinfo of the skb being checked, which
|
|
|
+ * could be either the first or the one on the frag_list
|
|
|
+ */
|
|
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
|
+ /* If this is non-NULL, we are currently checking the frag_list skb, and
|
|
|
+ * this points to the shinfo of the first one
|
|
|
+ */
|
|
|
+ struct skb_shared_info *first_shinfo = NULL;
|
|
|
int nr_frags = shinfo->nr_frags;
|
|
|
+ const bool sharedslot = nr_frags &&
|
|
|
+ frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
|
|
|
int i, err;
|
|
|
- struct sk_buff *first_skb = NULL;
|
|
|
|
|
|
/* Check status of header. */
|
|
|
err = (*gopp_copy)->status;
|
|
|
- (*gopp_copy)++;
|
|
|
if (unlikely(err)) {
|
|
|
if (net_ratelimit())
|
|
|
netdev_dbg(queue->vif->dev,
|
|
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
|
|
(*gopp_copy)->status,
|
|
|
pending_idx,
|
|
|
(*gopp_copy)->source.u.ref);
|
|
|
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
|
|
|
+ /* The first frag might still have this slot mapped */
|
|
|
+ if (!sharedslot)
|
|
|
+ xenvif_idx_release(queue, pending_idx,
|
|
|
+ XEN_NETIF_RSP_ERROR);
|
|
|
}
|
|
|
+ (*gopp_copy)++;
|
|
|
|
|
|
check_frags:
|
|
|
for (i = 0; i < nr_frags; i++, gop_map++) {
|
|
@@ -1062,8 +1073,19 @@ check_frags:
|
|
|
pending_idx,
|
|
|
gop_map->handle);
|
|
|
/* Had a previous error? Invalidate this fragment. */
|
|
|
- if (unlikely(err))
|
|
|
+ if (unlikely(err)) {
|
|
|
xenvif_idx_unmap(queue, pending_idx);
|
|
|
+ /* If the mapping of the first frag was OK, but
|
|
|
+ * the header's copy failed, and they are
|
|
|
+ * sharing a slot, send an error
|
|
|
+ */
|
|
|
+ if (i == 0 && sharedslot)
|
|
|
+ xenvif_idx_release(queue, pending_idx,
|
|
|
+ XEN_NETIF_RSP_ERROR);
|
|
|
+ else
|
|
|
+ xenvif_idx_release(queue, pending_idx,
|
|
|
+ XEN_NETIF_RSP_OKAY);
|
|
|
+ }
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1075,42 +1097,53 @@ check_frags:
|
|
|
gop_map->status,
|
|
|
pending_idx,
|
|
|
gop_map->ref);
|
|
|
+
|
|
|
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
|
|
|
|
|
|
/* Not the first error? Preceding frags already invalidated. */
|
|
|
if (err)
|
|
|
continue;
|
|
|
- /* First error: invalidate preceding fragments. */
|
|
|
+
|
|
|
+ /* First error: if the header haven't shared a slot with the
|
|
|
+ * first frag, release it as well.
|
|
|
+ */
|
|
|
+ if (!sharedslot)
|
|
|
+ xenvif_idx_release(queue,
|
|
|
+ XENVIF_TX_CB(skb)->pending_idx,
|
|
|
+ XEN_NETIF_RSP_OKAY);
|
|
|
+
|
|
|
+ /* Invalidate preceding fragments of this skb. */
|
|
|
for (j = 0; j < i; j++) {
|
|
|
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
|
|
xenvif_idx_unmap(queue, pending_idx);
|
|
|
+ xenvif_idx_release(queue, pending_idx,
|
|
|
+ XEN_NETIF_RSP_OKAY);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* And if we found the error while checking the frag_list, unmap
|
|
|
+ * the first skb's frags
|
|
|
+ */
|
|
|
+ if (first_shinfo) {
|
|
|
+ for (j = 0; j < first_shinfo->nr_frags; j++) {
|
|
|
+ pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
|
|
|
+ xenvif_idx_unmap(queue, pending_idx);
|
|
|
+ xenvif_idx_release(queue, pending_idx,
|
|
|
+ XEN_NETIF_RSP_OKAY);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* Remember the error: invalidate all subsequent fragments. */
|
|
|
err = newerr;
|
|
|
}
|
|
|
|
|
|
- if (skb_has_frag_list(skb)) {
|
|
|
- first_skb = skb;
|
|
|
- skb = shinfo->frag_list;
|
|
|
- shinfo = skb_shinfo(skb);
|
|
|
+ if (skb_has_frag_list(skb) && !first_shinfo) {
|
|
|
+ first_shinfo = skb_shinfo(skb);
|
|
|
+ shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
|
|
|
nr_frags = shinfo->nr_frags;
|
|
|
|
|
|
goto check_frags;
|
|
|
}
|
|
|
|
|
|
- /* There was a mapping error in the frag_list skb. We have to unmap
|
|
|
- * the first skb's frags
|
|
|
- */
|
|
|
- if (first_skb && err) {
|
|
|
- int j;
|
|
|
- shinfo = skb_shinfo(first_skb);
|
|
|
- for (j = 0; j < shinfo->nr_frags; j++) {
|
|
|
- pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
|
|
- xenvif_idx_unmap(queue, pending_idx);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
*gopp_map = gop_map;
|
|
|
return err;
|
|
|
}
|
|
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
|
|
|
|
|
/* Check the remap error code. */
|
|
|
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
|
|
|
+ /* If there was an error, xenvif_tx_check_gop is
|
|
|
+ * expected to release all the frags which were mapped,
|
|
|
+ * so kfree_skb shouldn't do it again
|
|
|
+ */
|
|
|
skb_shinfo(skb)->nr_frags = 0;
|
|
|
+ if (skb_has_frag_list(skb)) {
|
|
|
+ struct sk_buff *nskb =
|
|
|
+ skb_shinfo(skb)->frag_list;
|
|
|
+ skb_shinfo(nskb)->nr_frags = 0;
|
|
|
+ }
|
|
|
kfree_skb(skb);
|
|
|
continue;
|
|
|
}
|
|
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
|
|
tx_unmap_op.status);
|
|
|
BUG();
|
|
|
}
|
|
|
-
|
|
|
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
|
|
|
}
|
|
|
|
|
|
static inline int rx_work_todo(struct xenvif_queue *queue)
|