|
@@ -117,6 +117,7 @@ struct netfront_info {
|
|
} tx_skbs[NET_TX_RING_SIZE];
|
|
} tx_skbs[NET_TX_RING_SIZE];
|
|
grant_ref_t gref_tx_head;
|
|
grant_ref_t gref_tx_head;
|
|
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
|
|
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
|
|
|
|
+ struct page *grant_tx_page[NET_TX_RING_SIZE];
|
|
unsigned tx_skb_freelist;
|
|
unsigned tx_skb_freelist;
|
|
|
|
|
|
spinlock_t rx_lock ____cacheline_aligned_in_smp;
|
|
spinlock_t rx_lock ____cacheline_aligned_in_smp;
|
|
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
|
|
gnttab_release_grant_reference(
|
|
gnttab_release_grant_reference(
|
|
&np->gref_tx_head, np->grant_tx_ref[id]);
|
|
&np->gref_tx_head, np->grant_tx_ref[id]);
|
|
np->grant_tx_ref[id] = GRANT_INVALID_REF;
|
|
np->grant_tx_ref[id] = GRANT_INVALID_REF;
|
|
|
|
+ np->grant_tx_page[id] = NULL;
|
|
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
|
|
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
|
|
dev_kfree_skb_irq(skb);
|
|
dev_kfree_skb_irq(skb);
|
|
}
|
|
}
|
|
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
|
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
|
|
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
|
|
mfn, GNTMAP_readonly);
|
|
mfn, GNTMAP_readonly);
|
|
|
|
|
|
|
|
+ np->grant_tx_page[id] = virt_to_page(data);
|
|
tx->gref = np->grant_tx_ref[id] = ref;
|
|
tx->gref = np->grant_tx_ref[id] = ref;
|
|
tx->offset = offset;
|
|
tx->offset = offset;
|
|
tx->size = len;
|
|
tx->size = len;
|
|
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
|
np->xbdev->otherend_id,
|
|
np->xbdev->otherend_id,
|
|
mfn, GNTMAP_readonly);
|
|
mfn, GNTMAP_readonly);
|
|
|
|
|
|
|
|
+ np->grant_tx_page[id] = page;
|
|
tx->gref = np->grant_tx_ref[id] = ref;
|
|
tx->gref = np->grant_tx_ref[id] = ref;
|
|
tx->offset = offset;
|
|
tx->offset = offset;
|
|
tx->size = bytes;
|
|
tx->size = bytes;
|
|
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
mfn = virt_to_mfn(data);
|
|
mfn = virt_to_mfn(data);
|
|
gnttab_grant_foreign_access_ref(
|
|
gnttab_grant_foreign_access_ref(
|
|
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
|
|
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
|
|
|
|
+ np->grant_tx_page[id] = virt_to_page(data);
|
|
tx->gref = np->grant_tx_ref[id] = ref;
|
|
tx->gref = np->grant_tx_ref[id] = ref;
|
|
tx->offset = offset;
|
|
tx->offset = offset;
|
|
tx->size = len;
|
|
tx->size = len;
|
|
@@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
skb = np->tx_skbs[i].skb;
|
|
skb = np->tx_skbs[i].skb;
|
|
- gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
|
|
|
|
- GNTMAP_readonly);
|
|
|
|
- gnttab_release_grant_reference(&np->gref_tx_head,
|
|
|
|
- np->grant_tx_ref[i]);
|
|
|
|
|
|
+ get_page(np->grant_tx_page[i]);
|
|
|
|
+ gnttab_end_foreign_access(np->grant_tx_ref[i],
|
|
|
|
+ GNTMAP_readonly,
|
|
|
|
+ (unsigned long)page_address(np->grant_tx_page[i]));
|
|
|
|
+ np->grant_tx_page[i] = NULL;
|
|
np->grant_tx_ref[i] = GRANT_INVALID_REF;
|
|
np->grant_tx_ref[i] = GRANT_INVALID_REF;
|
|
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
|
|
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
|
|
dev_kfree_skb_irq(skb);
|
|
dev_kfree_skb_irq(skb);
|
|
@@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
|
|
|
|
|
|
static void xennet_release_rx_bufs(struct netfront_info *np)
|
|
static void xennet_release_rx_bufs(struct netfront_info *np)
|
|
{
|
|
{
|
|
- struct mmu_update *mmu = np->rx_mmu;
|
|
|
|
- struct multicall_entry *mcl = np->rx_mcl;
|
|
|
|
- struct sk_buff_head free_list;
|
|
|
|
- struct sk_buff *skb;
|
|
|
|
- unsigned long mfn;
|
|
|
|
- int xfer = 0, noxfer = 0, unused = 0;
|
|
|
|
int id, ref;
|
|
int id, ref;
|
|
|
|
|
|
- dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
|
|
|
|
- __func__);
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- skb_queue_head_init(&free_list);
|
|
|
|
-
|
|
|
|
spin_lock_bh(&np->rx_lock);
|
|
spin_lock_bh(&np->rx_lock);
|
|
|
|
|
|
for (id = 0; id < NET_RX_RING_SIZE; id++) {
|
|
for (id = 0; id < NET_RX_RING_SIZE; id++) {
|
|
- ref = np->grant_rx_ref[id];
|
|
|
|
- if (ref == GRANT_INVALID_REF) {
|
|
|
|
- unused++;
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
+ struct page *page;
|
|
|
|
|
|
skb = np->rx_skbs[id];
|
|
skb = np->rx_skbs[id];
|
|
- mfn = gnttab_end_foreign_transfer_ref(ref);
|
|
|
|
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
|
|
|
|
- np->grant_rx_ref[id] = GRANT_INVALID_REF;
|
|
|
|
-
|
|
|
|
- if (0 == mfn) {
|
|
|
|
- skb_shinfo(skb)->nr_frags = 0;
|
|
|
|
- dev_kfree_skb(skb);
|
|
|
|
- noxfer++;
|
|
|
|
|
|
+ if (!skb)
|
|
continue;
|
|
continue;
|
|
- }
|
|
|
|
|
|
|
|
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
|
|
- /* Remap the page. */
|
|
|
|
- const struct page *page =
|
|
|
|
- skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
|
|
|
- unsigned long pfn = page_to_pfn(page);
|
|
|
|
- void *vaddr = page_address(page);
|
|
|
|
|
|
+ ref = np->grant_rx_ref[id];
|
|
|
|
+ if (ref == GRANT_INVALID_REF)
|
|
|
|
+ continue;
|
|
|
|
|
|
- MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
|
|
|
|
- mfn_pte(mfn, PAGE_KERNEL),
|
|
|
|
- 0);
|
|
|
|
- mcl++;
|
|
|
|
- mmu->ptr = ((u64)mfn << PAGE_SHIFT)
|
|
|
|
- | MMU_MACHPHYS_UPDATE;
|
|
|
|
- mmu->val = pfn;
|
|
|
|
- mmu++;
|
|
|
|
|
|
+ page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
|
|
|
|
|
- set_phys_to_machine(pfn, mfn);
|
|
|
|
- }
|
|
|
|
- __skb_queue_tail(&free_list, skb);
|
|
|
|
- xfer++;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
|
|
|
|
- __func__, xfer, noxfer, unused);
|
|
|
|
|
|
+ /* gnttab_end_foreign_access() needs a page ref until
|
|
|
|
+ * foreign access is ended (which may be deferred).
|
|
|
|
+ */
|
|
|
|
+ get_page(page);
|
|
|
|
+ gnttab_end_foreign_access(ref, 0,
|
|
|
|
+ (unsigned long)page_address(page));
|
|
|
|
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
|
|
|
|
|
|
- if (xfer) {
|
|
|
|
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
|
|
- /* Do all the remapping work and M2P updates. */
|
|
|
|
- MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
|
|
|
|
- NULL, DOMID_SELF);
|
|
|
|
- mcl++;
|
|
|
|
- HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
|
|
|
|
- }
|
|
|
|
|
|
+ kfree_skb(skb);
|
|
}
|
|
}
|
|
|
|
|
|
- __skb_queue_purge(&free_list);
|
|
|
|
-
|
|
|
|
spin_unlock_bh(&np->rx_lock);
|
|
spin_unlock_bh(&np->rx_lock);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
|
|
for (i = 0; i < NET_RX_RING_SIZE; i++) {
|
|
for (i = 0; i < NET_RX_RING_SIZE; i++) {
|
|
np->rx_skbs[i] = NULL;
|
|
np->rx_skbs[i] = NULL;
|
|
np->grant_rx_ref[i] = GRANT_INVALID_REF;
|
|
np->grant_rx_ref[i] = GRANT_INVALID_REF;
|
|
|
|
+ np->grant_tx_page[i] = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/* A grant for every tx ring slot */
|
|
/* A grant for every tx ring slot */
|