|
@@ -168,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
|
|
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
|
|
if (skb_is_gso(skb))
|
|
|
needed++;
|
|
|
+ if (skb->sw_hash)
|
|
|
+ needed++;
|
|
|
|
|
|
do {
|
|
|
prod = queue->rx.sring->req_prod;
|
|
@@ -285,6 +287,8 @@ struct gop_frag_copy {
|
|
|
struct xenvif_rx_meta *meta;
|
|
|
int head;
|
|
|
int gso_type;
|
|
|
+ int protocol;
|
|
|
+ int hash_present;
|
|
|
|
|
|
struct page *page;
|
|
|
};
|
|
@@ -331,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
|
|
|
npo->copy_off += *len;
|
|
|
info->meta->size += *len;
|
|
|
|
|
|
+ if (!info->head)
|
|
|
+ return;
|
|
|
+
|
|
|
/* Leave a gap for the GSO descriptor. */
|
|
|
- if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
|
|
|
+ if ((1 << info->gso_type) & queue->vif->gso_mask)
|
|
|
+ queue->rx.req_cons++;
|
|
|
+
|
|
|
+ /* Leave a gap for the hash extra segment. */
|
|
|
+ if (info->hash_present)
|
|
|
queue->rx.req_cons++;
|
|
|
|
|
|
info->head = 0; /* There must be something in this buffer now */
|
|
@@ -367,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
|
|
|
.npo = npo,
|
|
|
.head = *head,
|
|
|
.gso_type = XEN_NETIF_GSO_TYPE_NONE,
|
|
|
+ /* xenvif_set_skb_hash() will have either set a s/w
|
|
|
+ * hash or cleared the hash depending on
|
|
|
+ * whether the the frontend wants a hash for this skb.
|
|
|
+ */
|
|
|
+ .hash_present = skb->sw_hash,
|
|
|
};
|
|
|
unsigned long bytes;
|
|
|
|
|
@@ -555,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
|
|
|
|
|
|
static void xenvif_rx_action(struct xenvif_queue *queue)
|
|
|
{
|
|
|
+ struct xenvif *vif = queue->vif;
|
|
|
s8 status;
|
|
|
u16 flags;
|
|
|
struct xen_netif_rx_response *resp;
|
|
@@ -590,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
|
|
|
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
|
|
|
|
|
|
while ((skb = __skb_dequeue(&rxq)) != NULL) {
|
|
|
+ struct xen_netif_extra_info *extra = NULL;
|
|
|
|
|
|
if ((1 << queue->meta[npo.meta_cons].gso_type) &
|
|
|
- queue->vif->gso_prefix_mask) {
|
|
|
+ vif->gso_prefix_mask) {
|
|
|
resp = RING_GET_RESPONSE(&queue->rx,
|
|
|
queue->rx.rsp_prod_pvt++);
|
|
|
|
|
@@ -610,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
|
|
|
queue->stats.tx_bytes += skb->len;
|
|
|
queue->stats.tx_packets++;
|
|
|
|
|
|
- status = xenvif_check_gop(queue->vif,
|
|
|
+ status = xenvif_check_gop(vif,
|
|
|
XENVIF_RX_CB(skb)->meta_slots_used,
|
|
|
&npo);
|
|
|
|
|
@@ -632,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
|
|
|
flags);
|
|
|
|
|
|
if ((1 << queue->meta[npo.meta_cons].gso_type) &
|
|
|
- queue->vif->gso_mask) {
|
|
|
- struct xen_netif_extra_info *gso =
|
|
|
- (struct xen_netif_extra_info *)
|
|
|
+ vif->gso_mask) {
|
|
|
+ extra = (struct xen_netif_extra_info *)
|
|
|
RING_GET_RESPONSE(&queue->rx,
|
|
|
queue->rx.rsp_prod_pvt++);
|
|
|
|
|
|
resp->flags |= XEN_NETRXF_extra_info;
|
|
|
|
|
|
- gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
|
|
|
- gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
|
|
|
- gso->u.gso.pad = 0;
|
|
|
- gso->u.gso.features = 0;
|
|
|
+ extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
|
|
|
+ extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
|
|
|
+ extra->u.gso.pad = 0;
|
|
|
+ extra->u.gso.features = 0;
|
|
|
+
|
|
|
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
|
|
|
+ extra->flags = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (skb->sw_hash) {
|
|
|
+ /* Since the skb got here via xenvif_select_queue()
|
|
|
+ * we know that the hash has been re-calculated
|
|
|
+ * according to a configuration set by the frontend
|
|
|
+ * and therefore we know that it is legitimate to
|
|
|
+ * pass it to the frontend.
|
|
|
+ */
|
|
|
+ if (resp->flags & XEN_NETRXF_extra_info)
|
|
|
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
|
|
|
+ else
|
|
|
+ resp->flags |= XEN_NETRXF_extra_info;
|
|
|
+
|
|
|
+ extra = (struct xen_netif_extra_info *)
|
|
|
+ RING_GET_RESPONSE(&queue->rx,
|
|
|
+ queue->rx.rsp_prod_pvt++);
|
|
|
|
|
|
- gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
|
|
|
- gso->flags = 0;
|
|
|
+ extra->u.hash.algorithm =
|
|
|
+ XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
|
|
|
+
|
|
|
+ if (skb->l4_hash)
|
|
|
+ extra->u.hash.type =
|
|
|
+ skb->protocol == htons(ETH_P_IP) ?
|
|
|
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
|
|
|
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
|
|
|
+ else
|
|
|
+ extra->u.hash.type =
|
|
|
+ skb->protocol == htons(ETH_P_IP) ?
|
|
|
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
|
|
|
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
|
|
|
+
|
|
|
+ *(uint32_t *)extra->u.hash.value =
|
|
|
+ skb_get_hash_raw(skb);
|
|
|
+
|
|
|
+ extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
|
|
|
+ extra->flags = 0;
|
|
|
}
|
|
|
|
|
|
xenvif_add_frag_responses(queue, status,
|