|
@@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
{
|
|
|
int ret;
|
|
|
u32 count;
|
|
|
+ int num_pkts;
|
|
|
+ int tx_num_deferred;
|
|
|
unsigned long flags;
|
|
|
struct vmxnet3_tx_ctx ctx;
|
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
@@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
#else
|
|
|
gdesc = ctx.sop_txd;
|
|
|
#endif
|
|
|
+ tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
|
|
|
if (ctx.mss) {
|
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
|
|
|
gdesc->txd.om = VMXNET3_OM_TSO;
|
|
|
gdesc->txd.msscof = ctx.mss;
|
|
|
- le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
|
|
|
- gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
|
|
|
+ num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
|
|
|
} else {
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
|
|
@@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
gdesc->txd.om = 0;
|
|
|
gdesc->txd.msscof = 0;
|
|
|
}
|
|
|
- le32_add_cpu(&tq->shared->txNumDeferred, 1);
|
|
|
+ num_pkts = 1;
|
|
|
}
|
|
|
+ le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
|
|
|
+ tx_num_deferred += num_pkts;
|
|
|
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
gdesc->txd.ti = 1;
|
|
@@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
|
|
|
spin_unlock_irqrestore(&tq->tx_lock, flags);
|
|
|
|
|
|
- if (le32_to_cpu(tq->shared->txNumDeferred) >=
|
|
|
- le32_to_cpu(tq->shared->txThreshold)) {
|
|
|
+ if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
|
|
|
tq->shared->txNumDeferred = 0;
|
|
|
VMXNET3_WRITE_BAR0_REG(adapter,
|
|
|
VMXNET3_REG_TXPROD + tq->qid * 8,
|