|
@@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
netif_stop_queue(dev);
|
|
|
if (!lp->tx_enable) /* Cannot send for now */
|
|
|
- return NETDEV_TX_LOCKED;
|
|
|
+ goto tx_err;
|
|
|
|
|
|
/*
|
|
|
** Clean out the TX ring asynchronously to interrupts - sometimes the
|
|
@@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
/* Test if cache is already locked - requeue skb if so */
|
|
|
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
|
|
|
- return NETDEV_TX_LOCKED;
|
|
|
+ goto tx_err;
|
|
|
|
|
|
/* Transmit descriptor ring full or stale skb */
|
|
|
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
|
|
@@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
|
|
|
lp->cache.lock = 0;
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
+tx_err:
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
/*
|