|
@@ -70,33 +70,33 @@ unsigned int rx_drain_timeout_jiffies;
|
|
|
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
|
|
|
module_param(fatal_skb_slots, uint, 0444);
|
|
|
|
|
|
-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
|
|
|
+static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
|
|
u8 status);
|
|
|
|
|
|
-static void make_tx_response(struct xenvif *vif,
|
|
|
+static void make_tx_response(struct xenvif_queue *queue,
|
|
|
struct xen_netif_tx_request *txp,
|
|
|
s8 st);
|
|
|
|
|
|
-static inline int tx_work_todo(struct xenvif *vif);
|
|
|
-static inline int rx_work_todo(struct xenvif *vif);
|
|
|
+static inline int tx_work_todo(struct xenvif_queue *queue);
|
|
|
+static inline int rx_work_todo(struct xenvif_queue *queue);
|
|
|
|
|
|
-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
|
|
|
+static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
|
|
u16 id,
|
|
|
s8 st,
|
|
|
u16 offset,
|
|
|
u16 size,
|
|
|
u16 flags);
|
|
|
|
|
|
-static inline unsigned long idx_to_pfn(struct xenvif *vif,
|
|
|
+static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
|
|
|
u16 idx)
|
|
|
{
|
|
|
- return page_to_pfn(vif->mmap_pages[idx]);
|
|
|
+ return page_to_pfn(queue->mmap_pages[idx]);
|
|
|
}
|
|
|
|
|
|
-static inline unsigned long idx_to_kaddr(struct xenvif *vif,
|
|
|
+static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
|
|
|
u16 idx)
|
|
|
{
|
|
|
- return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
|
|
|
+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
|
|
|
}
|
|
|
|
|
|
#define callback_param(vif, pending_idx) \
|
|
@@ -104,13 +104,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
|
|
|
|
|
|
/* Find the containing VIF's structure from a pointer in pending_tx_info array
|
|
|
*/
|
|
|
-static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
|
|
|
+static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
|
|
|
{
|
|
|
u16 pending_idx = ubuf->desc;
|
|
|
struct pending_tx_info *temp =
|
|
|
container_of(ubuf, struct pending_tx_info, callback_struct);
|
|
|
return container_of(temp - pending_idx,
|
|
|
- struct xenvif,
|
|
|
+ struct xenvif_queue,
|
|
|
pending_tx_info[0]);
|
|
|
}
|
|
|
|
|
@@ -136,24 +136,24 @@ static inline pending_ring_idx_t pending_index(unsigned i)
|
|
|
return i & (MAX_PENDING_REQS-1);
|
|
|
}
|
|
|
|
|
|
-bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
|
|
|
+bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
|
|
|
{
|
|
|
RING_IDX prod, cons;
|
|
|
|
|
|
do {
|
|
|
- prod = vif->rx.sring->req_prod;
|
|
|
- cons = vif->rx.req_cons;
|
|
|
+ prod = queue->rx.sring->req_prod;
|
|
|
+ cons = queue->rx.req_cons;
|
|
|
|
|
|
if (prod - cons >= needed)
|
|
|
return true;
|
|
|
|
|
|
- vif->rx.sring->req_event = prod + 1;
|
|
|
+ queue->rx.sring->req_event = prod + 1;
|
|
|
|
|
|
/* Make sure event is visible before we check prod
|
|
|
* again.
|
|
|
*/
|
|
|
mb();
|
|
|
- } while (vif->rx.sring->req_prod != prod);
|
|
|
+ } while (queue->rx.sring->req_prod != prod);
|
|
|
|
|
|
return false;
|
|
|
}
|
|
@@ -207,13 +207,13 @@ struct netrx_pending_operations {
|
|
|
grant_ref_t copy_gref;
|
|
|
};
|
|
|
|
|
|
-static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
|
|
|
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
|
|
|
struct netrx_pending_operations *npo)
|
|
|
{
|
|
|
struct xenvif_rx_meta *meta;
|
|
|
struct xen_netif_rx_request *req;
|
|
|
|
|
|
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
|
|
|
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
|
|
|
|
|
meta = npo->meta + npo->meta_prod++;
|
|
|
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
|
@@ -231,11 +231,11 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
|
|
|
* Set up the grant operations for this fragment. If it's a flipping
|
|
|
* interface, we also set up the unmap request from here.
|
|
|
*/
|
|
|
-static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
|
|
|
+static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
|
|
|
struct netrx_pending_operations *npo,
|
|
|
struct page *page, unsigned long size,
|
|
|
unsigned long offset, int *head,
|
|
|
- struct xenvif *foreign_vif,
|
|
|
+ struct xenvif_queue *foreign_queue,
|
|
|
grant_ref_t foreign_gref)
|
|
|
{
|
|
|
struct gnttab_copy *copy_gop;
|
|
@@ -268,7 +268,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
|
|
|
*/
|
|
|
BUG_ON(*head);
|
|
|
|
|
|
- meta = get_next_rx_buffer(vif, npo);
|
|
|
+ meta = get_next_rx_buffer(queue, npo);
|
|
|
}
|
|
|
|
|
|
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
|
|
@@ -278,8 +278,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
|
|
|
copy_gop->flags = GNTCOPY_dest_gref;
|
|
|
copy_gop->len = bytes;
|
|
|
|
|
|
- if (foreign_vif) {
|
|
|
- copy_gop->source.domid = foreign_vif->domid;
|
|
|
+ if (foreign_queue) {
|
|
|
+ copy_gop->source.domid = foreign_queue->vif->domid;
|
|
|
copy_gop->source.u.ref = foreign_gref;
|
|
|
copy_gop->flags |= GNTCOPY_source_gref;
|
|
|
} else {
|
|
@@ -289,7 +289,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
|
|
|
}
|
|
|
copy_gop->source.offset = offset;
|
|
|
|
|
|
- copy_gop->dest.domid = vif->domid;
|
|
|
+ copy_gop->dest.domid = queue->vif->domid;
|
|
|
copy_gop->dest.offset = npo->copy_off;
|
|
|
copy_gop->dest.u.ref = npo->copy_gref;
|
|
|
|
|
@@ -314,8 +314,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
|
|
|
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
|
|
|
}
|
|
|
|
|
|
- if (*head && ((1 << gso_type) & vif->gso_mask))
|
|
|
- vif->rx.req_cons++;
|
|
|
+ if (*head && ((1 << gso_type) & queue->vif->gso_mask))
|
|
|
+ queue->rx.req_cons++;
|
|
|
|
|
|
*head = 0; /* There must be something in this buffer now. */
|
|
|
|
|
@@ -337,13 +337,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
|
|
|
const int i,
|
|
|
const struct ubuf_info *ubuf)
|
|
|
{
|
|
|
- struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
|
|
|
+ struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
|
|
|
|
|
|
do {
|
|
|
u16 pending_idx = ubuf->desc;
|
|
|
|
|
|
if (skb_shinfo(skb)->frags[i].page.p ==
|
|
|
- foreign_vif->mmap_pages[pending_idx])
|
|
|
+ foreign_queue->mmap_pages[pending_idx])
|
|
|
break;
|
|
|
ubuf = (struct ubuf_info *) ubuf->ctx;
|
|
|
} while (ubuf);
|
|
@@ -364,7 +364,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
|
|
|
* frontend-side LRO).
|
|
|
*/
|
|
|
static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
- struct netrx_pending_operations *npo)
|
|
|
+ struct netrx_pending_operations *npo,
|
|
|
+ struct xenvif_queue *queue)
|
|
|
{
|
|
|
struct xenvif *vif = netdev_priv(skb->dev);
|
|
|
int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
@@ -390,7 +391,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
|
|
|
/* Set up a GSO prefix descriptor, if necessary */
|
|
|
if ((1 << gso_type) & vif->gso_prefix_mask) {
|
|
|
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
|
|
|
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
|
|
meta = npo->meta + npo->meta_prod++;
|
|
|
meta->gso_type = gso_type;
|
|
|
meta->gso_size = skb_shinfo(skb)->gso_size;
|
|
@@ -398,7 +399,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
meta->id = req->id;
|
|
|
}
|
|
|
|
|
|
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
|
|
|
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
|
|
meta = npo->meta + npo->meta_prod++;
|
|
|
|
|
|
if ((1 << gso_type) & vif->gso_mask) {
|
|
@@ -422,7 +423,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
if (data + len > skb_tail_pointer(skb))
|
|
|
len = skb_tail_pointer(skb) - data;
|
|
|
|
|
|
- xenvif_gop_frag_copy(vif, skb, npo,
|
|
|
+ xenvif_gop_frag_copy(queue, skb, npo,
|
|
|
virt_to_page(data), len, offset, &head,
|
|
|
NULL,
|
|
|
0);
|
|
@@ -433,7 +434,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
/* This variable also signals whether foreign_gref has a real
|
|
|
* value or not.
|
|
|
*/
|
|
|
- struct xenvif *foreign_vif = NULL;
|
|
|
+ struct xenvif_queue *foreign_queue = NULL;
|
|
|
grant_ref_t foreign_gref;
|
|
|
|
|
|
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
|
|
@@ -458,8 +459,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
if (likely(ubuf)) {
|
|
|
u16 pending_idx = ubuf->desc;
|
|
|
|
|
|
- foreign_vif = ubuf_to_vif(ubuf);
|
|
|
- foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
|
|
|
+ foreign_queue = ubuf_to_queue(ubuf);
|
|
|
+ foreign_gref =
|
|
|
+ foreign_queue->pending_tx_info[pending_idx].req.gref;
|
|
|
/* Just a safety measure. If this was the last
|
|
|
* element on the list, the for loop will
|
|
|
* iterate again if a local page were added to
|
|
@@ -477,13 +479,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
|
|
*/
|
|
|
ubuf = head_ubuf;
|
|
|
}
|
|
|
- xenvif_gop_frag_copy(vif, skb, npo,
|
|
|
+ xenvif_gop_frag_copy(queue, skb, npo,
|
|
|
skb_frag_page(&skb_shinfo(skb)->frags[i]),
|
|
|
skb_frag_size(&skb_shinfo(skb)->frags[i]),
|
|
|
skb_shinfo(skb)->frags[i].page_offset,
|
|
|
&head,
|
|
|
- foreign_vif,
|
|
|
- foreign_vif ? foreign_gref : UINT_MAX);
|
|
|
+ foreign_queue,
|
|
|
+ foreign_queue ? foreign_gref : UINT_MAX);
|
|
|
}
|
|
|
|
|
|
return npo->meta_prod - old_meta_prod;
|
|
@@ -515,7 +517,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-static void xenvif_add_frag_responses(struct xenvif *vif, int status,
|
|
|
+static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
|
|
|
struct xenvif_rx_meta *meta,
|
|
|
int nr_meta_slots)
|
|
|
{
|
|
@@ -536,7 +538,7 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
|
|
|
flags = XEN_NETRXF_more_data;
|
|
|
|
|
|
offset = 0;
|
|
|
- make_rx_response(vif, meta[i].id, status, offset,
|
|
|
+ make_rx_response(queue, meta[i].id, status, offset,
|
|
|
meta[i].size, flags);
|
|
|
}
|
|
|
}
|
|
@@ -547,12 +549,12 @@ struct xenvif_rx_cb {
|
|
|
|
|
|
#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
|
|
|
|
|
|
-void xenvif_kick_thread(struct xenvif *vif)
|
|
|
+void xenvif_kick_thread(struct xenvif_queue *queue)
|
|
|
{
|
|
|
- wake_up(&vif->wq);
|
|
|
+ wake_up(&queue->wq);
|
|
|
}
|
|
|
|
|
|
-static void xenvif_rx_action(struct xenvif *vif)
|
|
|
+static void xenvif_rx_action(struct xenvif_queue *queue)
|
|
|
{
|
|
|
s8 status;
|
|
|
u16 flags;
|
|
@@ -565,13 +567,13 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|
|
bool need_to_notify = false;
|
|
|
|
|
|
struct netrx_pending_operations npo = {
|
|
|
- .copy = vif->grant_copy_op,
|
|
|
- .meta = vif->meta,
|
|
|
+ .copy = queue->grant_copy_op,
|
|
|
+ .meta = queue->meta,
|
|
|
};
|
|
|
|
|
|
skb_queue_head_init(&rxq);
|
|
|
|
|
|
- while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
|
|
|
+ while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
|
|
|
RING_IDX max_slots_needed;
|
|
|
RING_IDX old_req_cons;
|
|
|
RING_IDX ring_slots_used;
|
|
@@ -614,42 +616,42 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|
|
max_slots_needed++;
|
|
|
|
|
|
/* If the skb may not fit then bail out now */
|
|
|
- if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
|
|
|
- skb_queue_head(&vif->rx_queue, skb);
|
|
|
+ if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
|
|
|
+ skb_queue_head(&queue->rx_queue, skb);
|
|
|
need_to_notify = true;
|
|
|
- vif->rx_last_skb_slots = max_slots_needed;
|
|
|
+ queue->rx_last_skb_slots = max_slots_needed;
|
|
|
break;
|
|
|
} else
|
|
|
- vif->rx_last_skb_slots = 0;
|
|
|
+ queue->rx_last_skb_slots = 0;
|
|
|
|
|
|
- old_req_cons = vif->rx.req_cons;
|
|
|
- XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
|
|
|
- ring_slots_used = vif->rx.req_cons - old_req_cons;
|
|
|
+ old_req_cons = queue->rx.req_cons;
|
|
|
+ XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
|
|
|
+ ring_slots_used = queue->rx.req_cons - old_req_cons;
|
|
|
|
|
|
BUG_ON(ring_slots_used > max_slots_needed);
|
|
|
|
|
|
__skb_queue_tail(&rxq, skb);
|
|
|
}
|
|
|
|
|
|
- BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
|
|
|
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
|
|
|
|
|
|
if (!npo.copy_prod)
|
|
|
goto done;
|
|
|
|
|
|
BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
|
|
|
- gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
|
|
|
+ gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
|
|
|
|
|
|
while ((skb = __skb_dequeue(&rxq)) != NULL) {
|
|
|
|
|
|
- if ((1 << vif->meta[npo.meta_cons].gso_type) &
|
|
|
- vif->gso_prefix_mask) {
|
|
|
- resp = RING_GET_RESPONSE(&vif->rx,
|
|
|
- vif->rx.rsp_prod_pvt++);
|
|
|
+ if ((1 << queue->meta[npo.meta_cons].gso_type) &
|
|
|
+ queue->vif->gso_prefix_mask) {
|
|
|
+ resp = RING_GET_RESPONSE(&queue->rx,
|
|
|
+ queue->rx.rsp_prod_pvt++);
|
|
|
|
|
|
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
|
|
|
|
|
|
- resp->offset = vif->meta[npo.meta_cons].gso_size;
|
|
|
- resp->id = vif->meta[npo.meta_cons].id;
|
|
|
+ resp->offset = queue->meta[npo.meta_cons].gso_size;
|
|
|
+ resp->id = queue->meta[npo.meta_cons].id;
|
|
|
resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
|
|
|
|
|
|
npo.meta_cons++;
|
|
@@ -657,10 +659,10 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|
|
}
|
|
|
|
|
|
|
|
|
- vif->dev->stats.tx_bytes += skb->len;
|
|
|
- vif->dev->stats.tx_packets++;
|
|
|
+ queue->stats.tx_bytes += skb->len;
|
|
|
+ queue->stats.tx_packets++;
|
|
|
|
|
|
- status = xenvif_check_gop(vif,
|
|
|
+ status = xenvif_check_gop(queue->vif,
|
|
|
XENVIF_RX_CB(skb)->meta_slots_used,
|
|
|
&npo);
|
|
|
|
|
@@ -676,22 +678,22 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|
|
flags |= XEN_NETRXF_data_validated;
|
|
|
|
|
|
offset = 0;
|
|
|
- resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
|
|
|
+ resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
|
|
|
status, offset,
|
|
|
- vif->meta[npo.meta_cons].size,
|
|
|
+ queue->meta[npo.meta_cons].size,
|
|
|
flags);
|
|
|
|
|
|
- if ((1 << vif->meta[npo.meta_cons].gso_type) &
|
|
|
- vif->gso_mask) {
|
|
|
+ if ((1 << queue->meta[npo.meta_cons].gso_type) &
|
|
|
+ queue->vif->gso_mask) {
|
|
|
struct xen_netif_extra_info *gso =
|
|
|
(struct xen_netif_extra_info *)
|
|
|
- RING_GET_RESPONSE(&vif->rx,
|
|
|
- vif->rx.rsp_prod_pvt++);
|
|
|
+ RING_GET_RESPONSE(&queue->rx,
|
|
|
+ queue->rx.rsp_prod_pvt++);
|
|
|
|
|
|
resp->flags |= XEN_NETRXF_extra_info;
|
|
|
|
|
|
- gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
|
|
|
- gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
|
|
|
+ gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
|
|
|
+ gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
|
|
|
gso->u.gso.pad = 0;
|
|
|
gso->u.gso.features = 0;
|
|
|
|
|
@@ -699,11 +701,11 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|
|
gso->flags = 0;
|
|
|
}
|
|
|
|
|
|
- xenvif_add_frag_responses(vif, status,
|
|
|
- vif->meta + npo.meta_cons + 1,
|
|
|
+ xenvif_add_frag_responses(queue, status,
|
|
|
+ queue->meta + npo.meta_cons + 1,
|
|
|
XENVIF_RX_CB(skb)->meta_slots_used);
|
|
|
|
|
|
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
|
|
|
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
|
|
|
|
|
|
need_to_notify |= !!ret;
|
|
|
|
|
@@ -713,20 +715,20 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|
|
|
|
|
done:
|
|
|
if (need_to_notify)
|
|
|
- notify_remote_via_irq(vif->rx_irq);
|
|
|
+ notify_remote_via_irq(queue->rx_irq);
|
|
|
}
|
|
|
|
|
|
-void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
|
|
|
+void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
|
|
|
{
|
|
|
int more_to_do;
|
|
|
|
|
|
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
|
|
|
+ RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
|
|
|
|
|
|
if (more_to_do)
|
|
|
- napi_schedule(&vif->napi);
|
|
|
+ napi_schedule(&queue->napi);
|
|
|
}
|
|
|
|
|
|
-static void tx_add_credit(struct xenvif *vif)
|
|
|
+static void tx_add_credit(struct xenvif_queue *queue)
|
|
|
{
|
|
|
unsigned long max_burst, max_credit;
|
|
|
|
|
@@ -734,55 +736,57 @@ static void tx_add_credit(struct xenvif *vif)
|
|
|
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
|
|
|
* Otherwise the interface can seize up due to insufficient credit.
|
|
|
*/
|
|
|
- max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
|
|
|
+ max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
|
|
|
max_burst = min(max_burst, 131072UL);
|
|
|
- max_burst = max(max_burst, vif->credit_bytes);
|
|
|
+ max_burst = max(max_burst, queue->credit_bytes);
|
|
|
|
|
|
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
|
|
|
- max_credit = vif->remaining_credit + vif->credit_bytes;
|
|
|
- if (max_credit < vif->remaining_credit)
|
|
|
+ max_credit = queue->remaining_credit + queue->credit_bytes;
|
|
|
+ if (max_credit < queue->remaining_credit)
|
|
|
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
|
|
|
|
|
|
- vif->remaining_credit = min(max_credit, max_burst);
|
|
|
+ queue->remaining_credit = min(max_credit, max_burst);
|
|
|
}
|
|
|
|
|
|
static void tx_credit_callback(unsigned long data)
|
|
|
{
|
|
|
- struct xenvif *vif = (struct xenvif *)data;
|
|
|
- tx_add_credit(vif);
|
|
|
- xenvif_napi_schedule_or_enable_events(vif);
|
|
|
+ struct xenvif_queue *queue = (struct xenvif_queue *)data;
|
|
|
+ tx_add_credit(queue);
|
|
|
+ xenvif_napi_schedule_or_enable_events(queue);
|
|
|
}
|
|
|
|
|
|
-static void xenvif_tx_err(struct xenvif *vif,
|
|
|
+static void xenvif_tx_err(struct xenvif_queue *queue,
|
|
|
struct xen_netif_tx_request *txp, RING_IDX end)
|
|
|
{
|
|
|
- RING_IDX cons = vif->tx.req_cons;
|
|
|
+ RING_IDX cons = queue->tx.req_cons;
|
|
|
unsigned long flags;
|
|
|
|
|
|
do {
|
|
|
- spin_lock_irqsave(&vif->response_lock, flags);
|
|
|
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
|
|
- spin_unlock_irqrestore(&vif->response_lock, flags);
|
|
|
+ spin_lock_irqsave(&queue->response_lock, flags);
|
|
|
+ make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
|
|
|
+ spin_unlock_irqrestore(&queue->response_lock, flags);
|
|
|
if (cons == end)
|
|
|
break;
|
|
|
- txp = RING_GET_REQUEST(&vif->tx, cons++);
|
|
|
+ txp = RING_GET_REQUEST(&queue->tx, cons++);
|
|
|
} while (1);
|
|
|
- vif->tx.req_cons = cons;
|
|
|
+ queue->tx.req_cons = cons;
|
|
|
}
|
|
|
|
|
|
static void xenvif_fatal_tx_err(struct xenvif *vif)
|
|
|
{
|
|
|
netdev_err(vif->dev, "fatal error; disabling device\n");
|
|
|
vif->disabled = true;
|
|
|
- xenvif_kick_thread(vif);
|
|
|
+ /* Disable the vif from queue 0's kthread */
|
|
|
+ if (vif->queues)
|
|
|
+ xenvif_kick_thread(&vif->queues[0]);
|
|
|
}
|
|
|
|
|
|
-static int xenvif_count_requests(struct xenvif *vif,
|
|
|
+static int xenvif_count_requests(struct xenvif_queue *queue,
|
|
|
struct xen_netif_tx_request *first,
|
|
|
struct xen_netif_tx_request *txp,
|
|
|
int work_to_do)
|
|
|
{
|
|
|
- RING_IDX cons = vif->tx.req_cons;
|
|
|
+ RING_IDX cons = queue->tx.req_cons;
|
|
|
int slots = 0;
|
|
|
int drop_err = 0;
|
|
|
int more_data;
|
|
@@ -794,10 +798,10 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
struct xen_netif_tx_request dropped_tx = { 0 };
|
|
|
|
|
|
if (slots >= work_to_do) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Asked for %d slots but exceeds this limit\n",
|
|
|
work_to_do);
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
return -ENODATA;
|
|
|
}
|
|
|
|
|
@@ -805,10 +809,10 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
* considered malicious.
|
|
|
*/
|
|
|
if (unlikely(slots >= fatal_skb_slots)) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Malicious frontend using %d slots, threshold %u\n",
|
|
|
slots, fatal_skb_slots);
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
return -E2BIG;
|
|
|
}
|
|
|
|
|
@@ -821,7 +825,7 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
*/
|
|
|
if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
|
|
|
if (net_ratelimit())
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
|
|
|
slots, XEN_NETBK_LEGACY_SLOTS_MAX);
|
|
|
drop_err = -E2BIG;
|
|
@@ -830,7 +834,7 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
if (drop_err)
|
|
|
txp = &dropped_tx;
|
|
|
|
|
|
- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
|
|
|
+ memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
|
|
|
sizeof(*txp));
|
|
|
|
|
|
/* If the guest submitted a frame >= 64 KiB then
|
|
@@ -844,7 +848,7 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
*/
|
|
|
if (!drop_err && txp->size > first->size) {
|
|
|
if (net_ratelimit())
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Invalid tx request, slot size %u > remaining size %u\n",
|
|
|
txp->size, first->size);
|
|
|
drop_err = -EIO;
|
|
@@ -854,9 +858,9 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
slots++;
|
|
|
|
|
|
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
|
|
|
- netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
|
|
|
+ netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
|
|
|
txp->offset, txp->size);
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -868,7 +872,7 @@ static int xenvif_count_requests(struct xenvif *vif,
|
|
|
} while (more_data);
|
|
|
|
|
|
if (drop_err) {
|
|
|
- xenvif_tx_err(vif, first, cons + slots);
|
|
|
+ xenvif_tx_err(queue, first, cons + slots);
|
|
|
return drop_err;
|
|
|
}
|
|
|
|
|
@@ -882,17 +886,17 @@ struct xenvif_tx_cb {
|
|
|
|
|
|
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
|
|
|
|
|
|
-static inline void xenvif_tx_create_map_op(struct xenvif *vif,
|
|
|
+static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
|
|
|
u16 pending_idx,
|
|
|
struct xen_netif_tx_request *txp,
|
|
|
struct gnttab_map_grant_ref *mop)
|
|
|
{
|
|
|
- vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
|
|
|
- gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
|
|
|
+ queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
|
|
|
+ gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
|
|
|
GNTMAP_host_map | GNTMAP_readonly,
|
|
|
- txp->gref, vif->domid);
|
|
|
+ txp->gref, queue->vif->domid);
|
|
|
|
|
|
- memcpy(&vif->pending_tx_info[pending_idx].req, txp,
|
|
|
+ memcpy(&queue->pending_tx_info[pending_idx].req, txp,
|
|
|
sizeof(*txp));
|
|
|
}
|
|
|
|
|
@@ -913,7 +917,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
|
|
|
return skb;
|
|
|
}
|
|
|
|
|
|
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
|
|
|
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
|
|
|
struct sk_buff *skb,
|
|
|
struct xen_netif_tx_request *txp,
|
|
|
struct gnttab_map_grant_ref *gop)
|
|
@@ -940,9 +944,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
|
|
|
|
|
|
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
|
|
|
shinfo->nr_frags++, txp++, gop++) {
|
|
|
- index = pending_index(vif->pending_cons++);
|
|
|
- pending_idx = vif->pending_ring[index];
|
|
|
- xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
|
|
|
+ index = pending_index(queue->pending_cons++);
|
|
|
+ pending_idx = queue->pending_ring[index];
|
|
|
+ xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
|
|
|
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
|
|
|
}
|
|
|
|
|
@@ -950,7 +954,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
|
|
|
struct sk_buff *nskb = xenvif_alloc_skb(0);
|
|
|
if (unlikely(nskb == NULL)) {
|
|
|
if (net_ratelimit())
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Can't allocate the frag_list skb.\n");
|
|
|
return NULL;
|
|
|
}
|
|
@@ -960,9 +964,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
|
|
|
|
|
|
for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
|
|
|
shinfo->nr_frags++, txp++, gop++) {
|
|
|
- index = pending_index(vif->pending_cons++);
|
|
|
- pending_idx = vif->pending_ring[index];
|
|
|
- xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
|
|
|
+ index = pending_index(queue->pending_cons++);
|
|
|
+ pending_idx = queue->pending_ring[index];
|
|
|
+ xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
|
|
|
frag_set_pending_idx(&frags[shinfo->nr_frags],
|
|
|
pending_idx);
|
|
|
}
|
|
@@ -973,34 +977,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
|
|
|
return gop;
|
|
|
}
|
|
|
|
|
|
-static inline void xenvif_grant_handle_set(struct xenvif *vif,
|
|
|
+static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
|
|
|
u16 pending_idx,
|
|
|
grant_handle_t handle)
|
|
|
{
|
|
|
- if (unlikely(vif->grant_tx_handle[pending_idx] !=
|
|
|
+ if (unlikely(queue->grant_tx_handle[pending_idx] !=
|
|
|
NETBACK_INVALID_HANDLE)) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Trying to overwrite active handle! pending_idx: %x\n",
|
|
|
pending_idx);
|
|
|
BUG();
|
|
|
}
|
|
|
- vif->grant_tx_handle[pending_idx] = handle;
|
|
|
+ queue->grant_tx_handle[pending_idx] = handle;
|
|
|
}
|
|
|
|
|
|
-static inline void xenvif_grant_handle_reset(struct xenvif *vif,
|
|
|
+static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
|
|
|
u16 pending_idx)
|
|
|
{
|
|
|
- if (unlikely(vif->grant_tx_handle[pending_idx] ==
|
|
|
+ if (unlikely(queue->grant_tx_handle[pending_idx] ==
|
|
|
NETBACK_INVALID_HANDLE)) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Trying to unmap invalid handle! pending_idx: %x\n",
|
|
|
pending_idx);
|
|
|
BUG();
|
|
|
}
|
|
|
- vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
|
|
|
+ queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
|
|
|
}
|
|
|
|
|
|
-static int xenvif_tx_check_gop(struct xenvif *vif,
|
|
|
+static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
|
|
struct sk_buff *skb,
|
|
|
struct gnttab_map_grant_ref **gopp_map,
|
|
|
struct gnttab_copy **gopp_copy)
|
|
@@ -1017,12 +1021,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
|
|
|
(*gopp_copy)++;
|
|
|
if (unlikely(err)) {
|
|
|
if (net_ratelimit())
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
|
|
(*gopp_copy)->status,
|
|
|
pending_idx,
|
|
|
(*gopp_copy)->source.u.ref);
|
|
|
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
|
|
|
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
|
|
|
}
|
|
|
|
|
|
check_frags:
|
|
@@ -1035,24 +1039,24 @@ check_frags:
|
|
|
newerr = gop_map->status;
|
|
|
|
|
|
if (likely(!newerr)) {
|
|
|
- xenvif_grant_handle_set(vif,
|
|
|
+ xenvif_grant_handle_set(queue,
|
|
|
pending_idx,
|
|
|
gop_map->handle);
|
|
|
/* Had a previous error? Invalidate this fragment. */
|
|
|
if (unlikely(err))
|
|
|
- xenvif_idx_unmap(vif, pending_idx);
|
|
|
+ xenvif_idx_unmap(queue, pending_idx);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
/* Error on this fragment: respond to client with an error. */
|
|
|
if (net_ratelimit())
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
|
|
|
i,
|
|
|
gop_map->status,
|
|
|
pending_idx,
|
|
|
gop_map->ref);
|
|
|
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
|
|
|
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
|
|
|
|
|
|
/* Not the first error? Preceding frags already invalidated. */
|
|
|
if (err)
|
|
@@ -1060,7 +1064,7 @@ check_frags:
|
|
|
/* First error: invalidate preceding fragments. */
|
|
|
for (j = 0; j < i; j++) {
|
|
|
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
|
|
- xenvif_idx_unmap(vif, pending_idx);
|
|
|
+ xenvif_idx_unmap(queue, pending_idx);
|
|
|
}
|
|
|
|
|
|
/* Remember the error: invalidate all subsequent fragments. */
|
|
@@ -1084,7 +1088,7 @@ check_frags:
|
|
|
shinfo = skb_shinfo(first_skb);
|
|
|
for (j = 0; j < shinfo->nr_frags; j++) {
|
|
|
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
|
|
- xenvif_idx_unmap(vif, pending_idx);
|
|
|
+ xenvif_idx_unmap(queue, pending_idx);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1092,7 +1096,7 @@ check_frags:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
|
|
|
+static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
|
|
|
{
|
|
|
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
|
|
int nr_frags = shinfo->nr_frags;
|
|
@@ -1110,23 +1114,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
|
|
|
/* If this is not the first frag, chain it to the previous*/
|
|
|
if (prev_pending_idx == INVALID_PENDING_IDX)
|
|
|
skb_shinfo(skb)->destructor_arg =
|
|
|
- &callback_param(vif, pending_idx);
|
|
|
+ &callback_param(queue, pending_idx);
|
|
|
else
|
|
|
- callback_param(vif, prev_pending_idx).ctx =
|
|
|
- &callback_param(vif, pending_idx);
|
|
|
+ callback_param(queue, prev_pending_idx).ctx =
|
|
|
+ &callback_param(queue, pending_idx);
|
|
|
|
|
|
- callback_param(vif, pending_idx).ctx = NULL;
|
|
|
+ callback_param(queue, pending_idx).ctx = NULL;
|
|
|
prev_pending_idx = pending_idx;
|
|
|
|
|
|
- txp = &vif->pending_tx_info[pending_idx].req;
|
|
|
- page = virt_to_page(idx_to_kaddr(vif, pending_idx));
|
|
|
+ txp = &queue->pending_tx_info[pending_idx].req;
|
|
|
+ page = virt_to_page(idx_to_kaddr(queue, pending_idx));
|
|
|
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
|
|
|
skb->len += txp->size;
|
|
|
skb->data_len += txp->size;
|
|
|
skb->truesize += txp->size;
|
|
|
|
|
|
/* Take an extra reference to offset network stack's put_page */
|
|
|
- get_page(vif->mmap_pages[pending_idx]);
|
|
|
+ get_page(queue->mmap_pages[pending_idx]);
|
|
|
}
|
|
|
/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
|
|
|
* overlaps with "index", and "mapping" is not set. I think mapping
|
|
@@ -1136,33 +1140,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
|
|
|
skb->pfmemalloc = false;
|
|
|
}
|
|
|
|
|
|
-static int xenvif_get_extras(struct xenvif *vif,
|
|
|
+static int xenvif_get_extras(struct xenvif_queue *queue,
|
|
|
struct xen_netif_extra_info *extras,
|
|
|
int work_to_do)
|
|
|
{
|
|
|
struct xen_netif_extra_info extra;
|
|
|
- RING_IDX cons = vif->tx.req_cons;
|
|
|
+ RING_IDX cons = queue->tx.req_cons;
|
|
|
|
|
|
do {
|
|
|
if (unlikely(work_to_do-- <= 0)) {
|
|
|
- netdev_err(vif->dev, "Missing extra info\n");
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ netdev_err(queue->vif->dev, "Missing extra info\n");
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
return -EBADR;
|
|
|
}
|
|
|
|
|
|
- memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
|
|
|
+ memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
|
|
|
sizeof(extra));
|
|
|
if (unlikely(!extra.type ||
|
|
|
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
|
|
- vif->tx.req_cons = ++cons;
|
|
|
- netdev_err(vif->dev,
|
|
|
+ queue->tx.req_cons = ++cons;
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Invalid extra type: %d\n", extra.type);
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
|
|
|
- vif->tx.req_cons = ++cons;
|
|
|
+ queue->tx.req_cons = ++cons;
|
|
|
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
|
|
|
|
|
return work_to_do;
|
|
@@ -1197,7 +1201,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
|
|
|
+static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
|
|
|
{
|
|
|
bool recalculate_partial_csum = false;
|
|
|
|
|
@@ -1207,7 +1211,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
|
|
|
* recalculate the partial checksum.
|
|
|
*/
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
|
|
|
- vif->rx_gso_checksum_fixup++;
|
|
|
+ queue->stats.rx_gso_checksum_fixup++;
|
|
|
skb->ip_summed = CHECKSUM_PARTIAL;
|
|
|
recalculate_partial_csum = true;
|
|
|
}
|
|
@@ -1219,31 +1223,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
|
|
|
return skb_checksum_setup(skb, recalculate_partial_csum);
|
|
|
}
|
|
|
|
|
|
-static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
|
|
|
+static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
|
|
{
|
|
|
u64 now = get_jiffies_64();
|
|
|
- u64 next_credit = vif->credit_window_start +
|
|
|
- msecs_to_jiffies(vif->credit_usec / 1000);
|
|
|
+ u64 next_credit = queue->credit_window_start +
|
|
|
+ msecs_to_jiffies(queue->credit_usec / 1000);
|
|
|
|
|
|
/* Timer could already be pending in rare cases. */
|
|
|
- if (timer_pending(&vif->credit_timeout))
|
|
|
+ if (timer_pending(&queue->credit_timeout))
|
|
|
return true;
|
|
|
|
|
|
/* Passed the point where we can replenish credit? */
|
|
|
if (time_after_eq64(now, next_credit)) {
|
|
|
- vif->credit_window_start = now;
|
|
|
- tx_add_credit(vif);
|
|
|
+ queue->credit_window_start = now;
|
|
|
+ tx_add_credit(queue);
|
|
|
}
|
|
|
|
|
|
/* Still too big to send right now? Set a callback. */
|
|
|
- if (size > vif->remaining_credit) {
|
|
|
- vif->credit_timeout.data =
|
|
|
- (unsigned long)vif;
|
|
|
- vif->credit_timeout.function =
|
|
|
+ if (size > queue->remaining_credit) {
|
|
|
+ queue->credit_timeout.data =
|
|
|
+ (unsigned long)queue;
|
|
|
+ queue->credit_timeout.function =
|
|
|
tx_credit_callback;
|
|
|
- mod_timer(&vif->credit_timeout,
|
|
|
+ mod_timer(&queue->credit_timeout,
|
|
|
next_credit);
|
|
|
- vif->credit_window_start = next_credit;
|
|
|
+ queue->credit_window_start = next_credit;
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -1251,16 +1255,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-static void xenvif_tx_build_gops(struct xenvif *vif,
|
|
|
+static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
|
|
int budget,
|
|
|
unsigned *copy_ops,
|
|
|
unsigned *map_ops)
|
|
|
{
|
|
|
- struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
|
|
|
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
|
|
|
struct sk_buff *skb;
|
|
|
int ret;
|
|
|
|
|
|
- while (skb_queue_len(&vif->tx_queue) < budget) {
|
|
|
+ while (skb_queue_len(&queue->tx_queue) < budget) {
|
|
|
struct xen_netif_tx_request txreq;
|
|
|
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
|
|
|
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
|
|
@@ -1270,69 +1274,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
|
|
|
unsigned int data_len;
|
|
|
pending_ring_idx_t index;
|
|
|
|
|
|
- if (vif->tx.sring->req_prod - vif->tx.req_cons >
|
|
|
+ if (queue->tx.sring->req_prod - queue->tx.req_cons >
|
|
|
XEN_NETIF_TX_RING_SIZE) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Impossible number of requests. "
|
|
|
"req_prod %d, req_cons %d, size %ld\n",
|
|
|
- vif->tx.sring->req_prod, vif->tx.req_cons,
|
|
|
+ queue->tx.sring->req_prod, queue->tx.req_cons,
|
|
|
XEN_NETIF_TX_RING_SIZE);
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
|
|
|
+ work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
|
|
|
if (!work_to_do)
|
|
|
break;
|
|
|
|
|
|
- idx = vif->tx.req_cons;
|
|
|
+ idx = queue->tx.req_cons;
|
|
|
rmb(); /* Ensure that we see the request before we copy it. */
|
|
|
- memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
|
|
|
+ memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
|
|
|
|
|
|
/* Credit-based scheduling. */
|
|
|
- if (txreq.size > vif->remaining_credit &&
|
|
|
- tx_credit_exceeded(vif, txreq.size))
|
|
|
+ if (txreq.size > queue->remaining_credit &&
|
|
|
+ tx_credit_exceeded(queue, txreq.size))
|
|
|
break;
|
|
|
|
|
|
- vif->remaining_credit -= txreq.size;
|
|
|
+ queue->remaining_credit -= txreq.size;
|
|
|
|
|
|
work_to_do--;
|
|
|
- vif->tx.req_cons = ++idx;
|
|
|
+ queue->tx.req_cons = ++idx;
|
|
|
|
|
|
memset(extras, 0, sizeof(extras));
|
|
|
if (txreq.flags & XEN_NETTXF_extra_info) {
|
|
|
- work_to_do = xenvif_get_extras(vif, extras,
|
|
|
+ work_to_do = xenvif_get_extras(queue, extras,
|
|
|
work_to_do);
|
|
|
- idx = vif->tx.req_cons;
|
|
|
+ idx = queue->tx.req_cons;
|
|
|
if (unlikely(work_to_do < 0))
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
|
|
|
+ ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
|
|
|
if (unlikely(ret < 0))
|
|
|
break;
|
|
|
|
|
|
idx += ret;
|
|
|
|
|
|
if (unlikely(txreq.size < ETH_HLEN)) {
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Bad packet size: %d\n", txreq.size);
|
|
|
- xenvif_tx_err(vif, &txreq, idx);
|
|
|
+ xenvif_tx_err(queue, &txreq, idx);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
/* No crossing a page as the payload mustn't fragment. */
|
|
|
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"txreq.offset: %x, size: %u, end: %lu\n",
|
|
|
txreq.offset, txreq.size,
|
|
|
(txreq.offset&~PAGE_MASK) + txreq.size);
|
|
|
- xenvif_fatal_tx_err(vif);
|
|
|
+ xenvif_fatal_tx_err(queue->vif);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- index = pending_index(vif->pending_cons);
|
|
|
- pending_idx = vif->pending_ring[index];
|
|
|
+ index = pending_index(queue->pending_cons);
|
|
|
+ pending_idx = queue->pending_ring[index];
|
|
|
|
|
|
data_len = (txreq.size > PKT_PROT_LEN &&
|
|
|
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
|
|
@@ -1340,9 +1344,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
|
|
|
|
|
|
skb = xenvif_alloc_skb(data_len);
|
|
|
if (unlikely(skb == NULL)) {
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Can't allocate a skb in start_xmit.\n");
|
|
|
- xenvif_tx_err(vif, &txreq, idx);
|
|
|
+ xenvif_tx_err(queue, &txreq, idx);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -1350,7 +1354,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
|
|
|
struct xen_netif_extra_info *gso;
|
|
|
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
|
|
|
|
|
|
- if (xenvif_set_skb_gso(vif, skb, gso)) {
|
|
|
+ if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
|
|
|
/* Failure in xenvif_set_skb_gso is fatal. */
|
|
|
kfree_skb(skb);
|
|
|
break;
|
|
@@ -1360,18 +1364,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
|
|
|
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
|
|
|
|
|
|
__skb_put(skb, data_len);
|
|
|
- vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
|
|
|
- vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
|
|
|
- vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
|
|
|
+ queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
|
|
|
+ queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
|
|
|
+ queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
|
|
|
|
|
|
- vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
|
|
|
+ queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
|
|
|
virt_to_mfn(skb->data);
|
|
|
- vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
|
|
|
- vif->tx_copy_ops[*copy_ops].dest.offset =
|
|
|
+ queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
|
|
|
+ queue->tx_copy_ops[*copy_ops].dest.offset =
|
|
|
offset_in_page(skb->data);
|
|
|
|
|
|
- vif->tx_copy_ops[*copy_ops].len = data_len;
|
|
|
- vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
|
|
|
+ queue->tx_copy_ops[*copy_ops].len = data_len;
|
|
|
+ queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
|
|
|
|
|
|
(*copy_ops)++;
|
|
|
|
|
@@ -1380,42 +1384,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
|
|
|
skb_shinfo(skb)->nr_frags++;
|
|
|
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
|
|
pending_idx);
|
|
|
- xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
|
|
|
+ xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
|
|
|
gop++;
|
|
|
} else {
|
|
|
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
|
|
INVALID_PENDING_IDX);
|
|
|
- memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
|
|
|
+ memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
|
|
|
sizeof(txreq));
|
|
|
}
|
|
|
|
|
|
- vif->pending_cons++;
|
|
|
+ queue->pending_cons++;
|
|
|
|
|
|
- request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
|
|
|
+ request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
|
|
|
if (request_gop == NULL) {
|
|
|
kfree_skb(skb);
|
|
|
- xenvif_tx_err(vif, &txreq, idx);
|
|
|
+ xenvif_tx_err(queue, &txreq, idx);
|
|
|
break;
|
|
|
}
|
|
|
gop = request_gop;
|
|
|
|
|
|
- __skb_queue_tail(&vif->tx_queue, skb);
|
|
|
+ __skb_queue_tail(&queue->tx_queue, skb);
|
|
|
|
|
|
- vif->tx.req_cons = idx;
|
|
|
+ queue->tx.req_cons = idx;
|
|
|
|
|
|
- if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
|
|
|
- (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
|
|
|
+ if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
|
|
+ (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- (*map_ops) = gop - vif->tx_map_ops;
|
|
|
+ (*map_ops) = gop - queue->tx_map_ops;
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
/* Consolidate skb with a frag_list into a brand new one with local pages on
|
|
|
* frags. Returns 0 or -ENOMEM if can't allocate new pages.
|
|
|
*/
|
|
|
-static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
|
|
|
+static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
|
|
|
{
|
|
|
unsigned int offset = skb_headlen(skb);
|
|
|
skb_frag_t frags[MAX_SKB_FRAGS];
|
|
@@ -1423,10 +1427,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
|
|
|
struct ubuf_info *uarg;
|
|
|
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
|
|
|
|
|
|
- vif->tx_zerocopy_sent += 2;
|
|
|
- vif->tx_frag_overflow++;
|
|
|
+ queue->stats.tx_zerocopy_sent += 2;
|
|
|
+ queue->stats.tx_frag_overflow++;
|
|
|
|
|
|
- xenvif_fill_frags(vif, nskb);
|
|
|
+ xenvif_fill_frags(queue, nskb);
|
|
|
/* Subtract frags size, we will correct it later */
|
|
|
skb->truesize -= skb->data_len;
|
|
|
skb->len += nskb->len;
|
|
@@ -1478,37 +1482,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int xenvif_tx_submit(struct xenvif *vif)
|
|
|
+static int xenvif_tx_submit(struct xenvif_queue *queue)
|
|
|
{
|
|
|
- struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
|
|
|
- struct gnttab_copy *gop_copy = vif->tx_copy_ops;
|
|
|
+ struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
|
|
|
+ struct gnttab_copy *gop_copy = queue->tx_copy_ops;
|
|
|
struct sk_buff *skb;
|
|
|
int work_done = 0;
|
|
|
|
|
|
- while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
|
|
|
+ while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
|
|
|
struct xen_netif_tx_request *txp;
|
|
|
u16 pending_idx;
|
|
|
unsigned data_len;
|
|
|
|
|
|
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
|
|
- txp = &vif->pending_tx_info[pending_idx].req;
|
|
|
+ txp = &queue->pending_tx_info[pending_idx].req;
|
|
|
|
|
|
/* Check the remap error code. */
|
|
|
- if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
|
|
|
+ if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
|
|
|
skb_shinfo(skb)->nr_frags = 0;
|
|
|
kfree_skb(skb);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
data_len = skb->len;
|
|
|
- callback_param(vif, pending_idx).ctx = NULL;
|
|
|
+ callback_param(queue, pending_idx).ctx = NULL;
|
|
|
if (data_len < txp->size) {
|
|
|
/* Append the packet payload as a fragment. */
|
|
|
txp->offset += data_len;
|
|
|
txp->size -= data_len;
|
|
|
} else {
|
|
|
/* Schedule a response immediately. */
|
|
|
- xenvif_idx_release(vif, pending_idx,
|
|
|
+ xenvif_idx_release(queue, pending_idx,
|
|
|
XEN_NETIF_RSP_OKAY);
|
|
|
}
|
|
|
|
|
@@ -1517,12 +1521,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
|
|
|
else if (txp->flags & XEN_NETTXF_data_validated)
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
- xenvif_fill_frags(vif, skb);
|
|
|
+ xenvif_fill_frags(queue, skb);
|
|
|
|
|
|
if (unlikely(skb_has_frag_list(skb))) {
|
|
|
- if (xenvif_handle_frag_list(vif, skb)) {
|
|
|
+ if (xenvif_handle_frag_list(queue, skb)) {
|
|
|
if (net_ratelimit())
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Not enough memory to consolidate frag_list!\n");
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
|
|
|
kfree_skb(skb);
|
|
@@ -1535,12 +1539,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
|
|
|
__pskb_pull_tail(skb, target - skb_headlen(skb));
|
|
|
}
|
|
|
|
|
|
- skb->dev = vif->dev;
|
|
|
+ skb->dev = queue->vif->dev;
|
|
|
skb->protocol = eth_type_trans(skb, skb->dev);
|
|
|
skb_reset_network_header(skb);
|
|
|
|
|
|
- if (checksum_setup(vif, skb)) {
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ if (checksum_setup(queue, skb)) {
|
|
|
+ netdev_dbg(queue->vif->dev,
|
|
|
"Can't setup checksum in net_tx_action\n");
|
|
|
/* We have to set this flag to trigger the callback */
|
|
|
if (skb_shinfo(skb)->destructor_arg)
|
|
@@ -1565,8 +1569,8 @@ static int xenvif_tx_submit(struct xenvif *vif)
|
|
|
DIV_ROUND_UP(skb->len - hdrlen, mss);
|
|
|
}
|
|
|
|
|
|
- vif->dev->stats.rx_bytes += skb->len;
|
|
|
- vif->dev->stats.rx_packets++;
|
|
|
+ queue->stats.rx_bytes += skb->len;
|
|
|
+ queue->stats.rx_packets++;
|
|
|
|
|
|
work_done++;
|
|
|
|
|
@@ -1577,7 +1581,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
|
|
|
*/
|
|
|
if (skb_shinfo(skb)->destructor_arg) {
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
|
|
|
- vif->tx_zerocopy_sent++;
|
|
|
+ queue->stats.tx_zerocopy_sent++;
|
|
|
}
|
|
|
|
|
|
netif_receive_skb(skb);
|
|
@@ -1590,47 +1594,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
pending_ring_idx_t index;
|
|
|
- struct xenvif *vif = ubuf_to_vif(ubuf);
|
|
|
+ struct xenvif_queue *queue = ubuf_to_queue(ubuf);
|
|
|
|
|
|
/* This is the only place where we grab this lock, to protect callbacks
|
|
|
* from each other.
|
|
|
*/
|
|
|
- spin_lock_irqsave(&vif->callback_lock, flags);
|
|
|
+ spin_lock_irqsave(&queue->callback_lock, flags);
|
|
|
do {
|
|
|
u16 pending_idx = ubuf->desc;
|
|
|
ubuf = (struct ubuf_info *) ubuf->ctx;
|
|
|
- BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
|
|
|
+ BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
|
|
|
MAX_PENDING_REQS);
|
|
|
- index = pending_index(vif->dealloc_prod);
|
|
|
- vif->dealloc_ring[index] = pending_idx;
|
|
|
+ index = pending_index(queue->dealloc_prod);
|
|
|
+ queue->dealloc_ring[index] = pending_idx;
|
|
|
/* Sync with xenvif_tx_dealloc_action:
|
|
|
* insert idx then incr producer.
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
- vif->dealloc_prod++;
|
|
|
+ queue->dealloc_prod++;
|
|
|
} while (ubuf);
|
|
|
- wake_up(&vif->dealloc_wq);
|
|
|
- spin_unlock_irqrestore(&vif->callback_lock, flags);
|
|
|
+ wake_up(&queue->dealloc_wq);
|
|
|
+ spin_unlock_irqrestore(&queue->callback_lock, flags);
|
|
|
|
|
|
if (likely(zerocopy_success))
|
|
|
- vif->tx_zerocopy_success++;
|
|
|
+ queue->stats.tx_zerocopy_success++;
|
|
|
else
|
|
|
- vif->tx_zerocopy_fail++;
|
|
|
+ queue->stats.tx_zerocopy_fail++;
|
|
|
}
|
|
|
|
|
|
-static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
|
|
|
+static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
|
|
|
{
|
|
|
struct gnttab_unmap_grant_ref *gop;
|
|
|
pending_ring_idx_t dc, dp;
|
|
|
u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
|
|
|
unsigned int i = 0;
|
|
|
|
|
|
- dc = vif->dealloc_cons;
|
|
|
- gop = vif->tx_unmap_ops;
|
|
|
+ dc = queue->dealloc_cons;
|
|
|
+ gop = queue->tx_unmap_ops;
|
|
|
|
|
|
/* Free up any grants we have finished using */
|
|
|
do {
|
|
|
- dp = vif->dealloc_prod;
|
|
|
+ dp = queue->dealloc_prod;
|
|
|
|
|
|
/* Ensure we see all indices enqueued by all
|
|
|
* xenvif_zerocopy_callback().
|
|
@@ -1638,38 +1642,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
|
|
|
smp_rmb();
|
|
|
|
|
|
while (dc != dp) {
|
|
|
- BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
|
|
|
+ BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
|
|
|
pending_idx =
|
|
|
- vif->dealloc_ring[pending_index(dc++)];
|
|
|
+ queue->dealloc_ring[pending_index(dc++)];
|
|
|
|
|
|
- pending_idx_release[gop-vif->tx_unmap_ops] =
|
|
|
+ pending_idx_release[gop-queue->tx_unmap_ops] =
|
|
|
pending_idx;
|
|
|
- vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
|
|
|
- vif->mmap_pages[pending_idx];
|
|
|
+ queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
|
|
|
+ queue->mmap_pages[pending_idx];
|
|
|
gnttab_set_unmap_op(gop,
|
|
|
- idx_to_kaddr(vif, pending_idx),
|
|
|
+ idx_to_kaddr(queue, pending_idx),
|
|
|
GNTMAP_host_map,
|
|
|
- vif->grant_tx_handle[pending_idx]);
|
|
|
- xenvif_grant_handle_reset(vif, pending_idx);
|
|
|
+ queue->grant_tx_handle[pending_idx]);
|
|
|
+ xenvif_grant_handle_reset(queue, pending_idx);
|
|
|
++gop;
|
|
|
}
|
|
|
|
|
|
- } while (dp != vif->dealloc_prod);
|
|
|
+ } while (dp != queue->dealloc_prod);
|
|
|
|
|
|
- vif->dealloc_cons = dc;
|
|
|
+ queue->dealloc_cons = dc;
|
|
|
|
|
|
- if (gop - vif->tx_unmap_ops > 0) {
|
|
|
+ if (gop - queue->tx_unmap_ops > 0) {
|
|
|
int ret;
|
|
|
- ret = gnttab_unmap_refs(vif->tx_unmap_ops,
|
|
|
+ ret = gnttab_unmap_refs(queue->tx_unmap_ops,
|
|
|
NULL,
|
|
|
- vif->pages_to_unmap,
|
|
|
- gop - vif->tx_unmap_ops);
|
|
|
+ queue->pages_to_unmap,
|
|
|
+ gop - queue->tx_unmap_ops);
|
|
|
if (ret) {
|
|
|
- netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
|
|
|
- gop - vif->tx_unmap_ops, ret);
|
|
|
- for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
|
|
|
+ netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
|
|
|
+ gop - queue->tx_unmap_ops, ret);
|
|
|
+ for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
|
|
|
if (gop[i].status != GNTST_okay)
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
" host_addr: %llx handle: %x status: %d\n",
|
|
|
gop[i].host_addr,
|
|
|
gop[i].handle,
|
|
@@ -1679,91 +1683,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
|
|
|
- xenvif_idx_release(vif, pending_idx_release[i],
|
|
|
+ for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
|
|
|
+ xenvif_idx_release(queue, pending_idx_release[i],
|
|
|
XEN_NETIF_RSP_OKAY);
|
|
|
}
|
|
|
|
|
|
|
|
|
/* Called after netfront has transmitted */
|
|
|
-int xenvif_tx_action(struct xenvif *vif, int budget)
|
|
|
+int xenvif_tx_action(struct xenvif_queue *queue, int budget)
|
|
|
{
|
|
|
unsigned nr_mops, nr_cops = 0;
|
|
|
int work_done, ret;
|
|
|
|
|
|
- if (unlikely(!tx_work_todo(vif)))
|
|
|
+ if (unlikely(!tx_work_todo(queue)))
|
|
|
return 0;
|
|
|
|
|
|
- xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
|
|
|
+ xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
|
|
|
|
|
|
if (nr_cops == 0)
|
|
|
return 0;
|
|
|
|
|
|
- gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
|
|
|
+ gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
|
|
|
if (nr_mops != 0) {
|
|
|
- ret = gnttab_map_refs(vif->tx_map_ops,
|
|
|
+ ret = gnttab_map_refs(queue->tx_map_ops,
|
|
|
NULL,
|
|
|
- vif->pages_to_map,
|
|
|
+ queue->pages_to_map,
|
|
|
nr_mops);
|
|
|
BUG_ON(ret);
|
|
|
}
|
|
|
|
|
|
- work_done = xenvif_tx_submit(vif);
|
|
|
+ work_done = xenvif_tx_submit(queue);
|
|
|
|
|
|
return work_done;
|
|
|
}
|
|
|
|
|
|
-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
|
|
|
+static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
|
|
u8 status)
|
|
|
{
|
|
|
struct pending_tx_info *pending_tx_info;
|
|
|
pending_ring_idx_t index;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- pending_tx_info = &vif->pending_tx_info[pending_idx];
|
|
|
- spin_lock_irqsave(&vif->response_lock, flags);
|
|
|
- make_tx_response(vif, &pending_tx_info->req, status);
|
|
|
- index = pending_index(vif->pending_prod);
|
|
|
- vif->pending_ring[index] = pending_idx;
|
|
|
+ pending_tx_info = &queue->pending_tx_info[pending_idx];
|
|
|
+ spin_lock_irqsave(&queue->response_lock, flags);
|
|
|
+ make_tx_response(queue, &pending_tx_info->req, status);
|
|
|
+ index = pending_index(queue->pending_prod);
|
|
|
+ queue->pending_ring[index] = pending_idx;
|
|
|
/* TX shouldn't use the index before we give it back here */
|
|
|
mb();
|
|
|
- vif->pending_prod++;
|
|
|
- spin_unlock_irqrestore(&vif->response_lock, flags);
|
|
|
+ queue->pending_prod++;
|
|
|
+ spin_unlock_irqrestore(&queue->response_lock, flags);
|
|
|
}
|
|
|
|
|
|
|
|
|
-static void make_tx_response(struct xenvif *vif,
|
|
|
+static void make_tx_response(struct xenvif_queue *queue,
|
|
|
struct xen_netif_tx_request *txp,
|
|
|
s8 st)
|
|
|
{
|
|
|
- RING_IDX i = vif->tx.rsp_prod_pvt;
|
|
|
+ RING_IDX i = queue->tx.rsp_prod_pvt;
|
|
|
struct xen_netif_tx_response *resp;
|
|
|
int notify;
|
|
|
|
|
|
- resp = RING_GET_RESPONSE(&vif->tx, i);
|
|
|
+ resp = RING_GET_RESPONSE(&queue->tx, i);
|
|
|
resp->id = txp->id;
|
|
|
resp->status = st;
|
|
|
|
|
|
if (txp->flags & XEN_NETTXF_extra_info)
|
|
|
- RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
|
|
|
+ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
|
|
|
|
|
|
- vif->tx.rsp_prod_pvt = ++i;
|
|
|
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
|
|
|
+ queue->tx.rsp_prod_pvt = ++i;
|
|
|
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
|
|
if (notify)
|
|
|
- notify_remote_via_irq(vif->tx_irq);
|
|
|
+ notify_remote_via_irq(queue->tx_irq);
|
|
|
}
|
|
|
|
|
|
-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
|
|
|
+static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
|
|
u16 id,
|
|
|
s8 st,
|
|
|
u16 offset,
|
|
|
u16 size,
|
|
|
u16 flags)
|
|
|
{
|
|
|
- RING_IDX i = vif->rx.rsp_prod_pvt;
|
|
|
+ RING_IDX i = queue->rx.rsp_prod_pvt;
|
|
|
struct xen_netif_rx_response *resp;
|
|
|
|
|
|
- resp = RING_GET_RESPONSE(&vif->rx, i);
|
|
|
+ resp = RING_GET_RESPONSE(&queue->rx, i);
|
|
|
resp->offset = offset;
|
|
|
resp->flags = flags;
|
|
|
resp->id = id;
|
|
@@ -1771,26 +1775,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
|
|
|
if (st < 0)
|
|
|
resp->status = (s16)st;
|
|
|
|
|
|
- vif->rx.rsp_prod_pvt = ++i;
|
|
|
+ queue->rx.rsp_prod_pvt = ++i;
|
|
|
|
|
|
return resp;
|
|
|
}
|
|
|
|
|
|
-void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
|
|
|
+void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
|
|
{
|
|
|
int ret;
|
|
|
struct gnttab_unmap_grant_ref tx_unmap_op;
|
|
|
|
|
|
gnttab_set_unmap_op(&tx_unmap_op,
|
|
|
- idx_to_kaddr(vif, pending_idx),
|
|
|
+ idx_to_kaddr(queue, pending_idx),
|
|
|
GNTMAP_host_map,
|
|
|
- vif->grant_tx_handle[pending_idx]);
|
|
|
- xenvif_grant_handle_reset(vif, pending_idx);
|
|
|
+ queue->grant_tx_handle[pending_idx]);
|
|
|
+ xenvif_grant_handle_reset(queue, pending_idx);
|
|
|
|
|
|
ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
|
|
|
- &vif->mmap_pages[pending_idx], 1);
|
|
|
+ &queue->mmap_pages[pending_idx], 1);
|
|
|
if (ret) {
|
|
|
- netdev_err(vif->dev,
|
|
|
+ netdev_err(queue->vif->dev,
|
|
|
"Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
|
|
|
ret,
|
|
|
pending_idx,
|
|
@@ -1800,41 +1804,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
|
|
|
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
|
|
|
}
|
|
|
|
|
|
-static inline int rx_work_todo(struct xenvif *vif)
|
|
|
+static inline int rx_work_todo(struct xenvif_queue *queue)
|
|
|
{
|
|
|
- return (!skb_queue_empty(&vif->rx_queue) &&
|
|
|
- xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
|
|
|
- vif->rx_queue_purge;
|
|
|
+ return (!skb_queue_empty(&queue->rx_queue) &&
|
|
|
+ xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
|
|
|
+ queue->rx_queue_purge;
|
|
|
}
|
|
|
|
|
|
-static inline int tx_work_todo(struct xenvif *vif)
|
|
|
+static inline int tx_work_todo(struct xenvif_queue *queue)
|
|
|
{
|
|
|
-
|
|
|
- if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
|
|
|
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
|
|
|
return 1;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline bool tx_dealloc_work_todo(struct xenvif *vif)
|
|
|
+static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
|
|
|
{
|
|
|
- return vif->dealloc_cons != vif->dealloc_prod;
|
|
|
+ return queue->dealloc_cons != queue->dealloc_prod;
|
|
|
}
|
|
|
|
|
|
-void xenvif_unmap_frontend_rings(struct xenvif *vif)
|
|
|
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
|
|
|
{
|
|
|
- if (vif->tx.sring)
|
|
|
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
|
|
|
- vif->tx.sring);
|
|
|
- if (vif->rx.sring)
|
|
|
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
|
|
|
- vif->rx.sring);
|
|
|
+ if (queue->tx.sring)
|
|
|
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
|
|
|
+ queue->tx.sring);
|
|
|
+ if (queue->rx.sring)
|
|
|
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
|
|
|
+ queue->rx.sring);
|
|
|
}
|
|
|
|
|
|
-int xenvif_map_frontend_rings(struct xenvif *vif,
|
|
|
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
|
|
|
grant_ref_t tx_ring_ref,
|
|
|
grant_ref_t rx_ring_ref)
|
|
|
{
|
|
@@ -1844,85 +1847,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
|
|
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
|
|
|
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
|
|
|
tx_ring_ref, &addr);
|
|
|
if (err)
|
|
|
goto err;
|
|
|
|
|
|
txs = (struct xen_netif_tx_sring *)addr;
|
|
|
- BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
|
|
|
+ BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
|
|
|
|
|
|
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
|
|
|
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
|
|
|
rx_ring_ref, &addr);
|
|
|
if (err)
|
|
|
goto err;
|
|
|
|
|
|
rxs = (struct xen_netif_rx_sring *)addr;
|
|
|
- BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
|
|
|
+ BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
|
- xenvif_unmap_frontend_rings(vif);
|
|
|
+ xenvif_unmap_frontend_rings(queue);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-void xenvif_stop_queue(struct xenvif *vif)
|
|
|
-{
|
|
|
- if (!vif->can_queue)
|
|
|
- return;
|
|
|
-
|
|
|
- netif_stop_queue(vif->dev);
|
|
|
-}
|
|
|
-
|
|
|
-static void xenvif_start_queue(struct xenvif *vif)
|
|
|
+static void xenvif_start_queue(struct xenvif_queue *queue)
|
|
|
{
|
|
|
- if (xenvif_schedulable(vif))
|
|
|
- netif_wake_queue(vif->dev);
|
|
|
+ if (xenvif_schedulable(queue->vif))
|
|
|
+ xenvif_wake_queue(queue);
|
|
|
}
|
|
|
|
|
|
int xenvif_kthread_guest_rx(void *data)
|
|
|
{
|
|
|
- struct xenvif *vif = data;
|
|
|
+ struct xenvif_queue *queue = data;
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
- wait_event_interruptible(vif->wq,
|
|
|
- rx_work_todo(vif) ||
|
|
|
- vif->disabled ||
|
|
|
+ wait_event_interruptible(queue->wq,
|
|
|
+ rx_work_todo(queue) ||
|
|
|
+ queue->vif->disabled ||
|
|
|
kthread_should_stop());
|
|
|
|
|
|
/* This frontend is found to be rogue, disable it in
|
|
|
* kthread context. Currently this is only set when
|
|
|
* netback finds out frontend sends malformed packet,
|
|
|
* but we cannot disable the interface in softirq
|
|
|
- * context so we defer it here.
|
|
|
+ * context so we defer it here, if this thread is
|
|
|
+ * associated with queue 0.
|
|
|
*/
|
|
|
- if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
|
|
|
- xenvif_carrier_off(vif);
|
|
|
+ if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
|
|
|
+ xenvif_carrier_off(queue->vif);
|
|
|
|
|
|
if (kthread_should_stop())
|
|
|
break;
|
|
|
|
|
|
- if (vif->rx_queue_purge) {
|
|
|
- skb_queue_purge(&vif->rx_queue);
|
|
|
- vif->rx_queue_purge = false;
|
|
|
+ if (queue->rx_queue_purge) {
|
|
|
+ skb_queue_purge(&queue->rx_queue);
|
|
|
+ queue->rx_queue_purge = false;
|
|
|
}
|
|
|
|
|
|
- if (!skb_queue_empty(&vif->rx_queue))
|
|
|
- xenvif_rx_action(vif);
|
|
|
+ if (!skb_queue_empty(&queue->rx_queue))
|
|
|
+ xenvif_rx_action(queue);
|
|
|
|
|
|
- if (skb_queue_empty(&vif->rx_queue) &&
|
|
|
- netif_queue_stopped(vif->dev)) {
|
|
|
- del_timer_sync(&vif->wake_queue);
|
|
|
- xenvif_start_queue(vif);
|
|
|
+ if (skb_queue_empty(&queue->rx_queue) &&
|
|
|
+ xenvif_queue_stopped(queue)) {
|
|
|
+ del_timer_sync(&queue->wake_queue);
|
|
|
+ xenvif_start_queue(queue);
|
|
|
}
|
|
|
|
|
|
cond_resched();
|
|
|
}
|
|
|
|
|
|
/* Bin any remaining skbs */
|
|
|
- while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
|
|
|
+ while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
|
|
|
dev_kfree_skb(skb);
|
|
|
|
|
|
return 0;
|
|
@@ -1930,22 +1926,22 @@ int xenvif_kthread_guest_rx(void *data)
|
|
|
|
|
|
int xenvif_dealloc_kthread(void *data)
|
|
|
{
|
|
|
- struct xenvif *vif = data;
|
|
|
+ struct xenvif_queue *queue = data;
|
|
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
- wait_event_interruptible(vif->dealloc_wq,
|
|
|
- tx_dealloc_work_todo(vif) ||
|
|
|
+ wait_event_interruptible(queue->dealloc_wq,
|
|
|
+ tx_dealloc_work_todo(queue) ||
|
|
|
kthread_should_stop());
|
|
|
if (kthread_should_stop())
|
|
|
break;
|
|
|
|
|
|
- xenvif_tx_dealloc_action(vif);
|
|
|
+ xenvif_tx_dealloc_action(queue);
|
|
|
cond_resched();
|
|
|
}
|
|
|
|
|
|
/* Unmap anything remaining*/
|
|
|
- if (tx_dealloc_work_todo(vif))
|
|
|
- xenvif_tx_dealloc_action(vif);
|
|
|
+ if (tx_dealloc_work_todo(queue))
|
|
|
+ xenvif_tx_dealloc_action(queue);
|
|
|
|
|
|
return 0;
|
|
|
}
|