|
@@ -490,6 +490,9 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
|
|
{
|
|
{
|
|
struct sk_buff *new_skb;
|
|
struct sk_buff *new_skb;
|
|
|
|
|
|
|
|
+ if (skb_linearize(skb))
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
/* Alloc new skb */
|
|
/* Alloc new skb */
|
|
new_skb = netdev_alloc_skb(dev, skb->len + 4);
|
|
new_skb = netdev_alloc_skb(dev, skb->len + 4);
|
|
if (!new_skb)
|
|
if (!new_skb)
|
|
@@ -515,12 +518,27 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
cbd_t __iomem *bdp;
|
|
cbd_t __iomem *bdp;
|
|
int curidx;
|
|
int curidx;
|
|
u16 sc;
|
|
u16 sc;
|
|
- int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
|
|
+ int nr_frags;
|
|
skb_frag_t *frag;
|
|
skb_frag_t *frag;
|
|
int len;
|
|
int len;
|
|
-
|
|
|
|
#ifdef CONFIG_FS_ENET_MPC5121_FEC
|
|
#ifdef CONFIG_FS_ENET_MPC5121_FEC
|
|
- if (((unsigned long)skb->data) & 0x3) {
|
|
|
|
|
|
+ int is_aligned = 1;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
|
|
|
|
+ is_aligned = 0;
|
|
|
|
+ } else {
|
|
|
|
+ nr_frags = skb_shinfo(skb)->nr_frags;
|
|
|
|
+ frag = skb_shinfo(skb)->frags;
|
|
|
|
+ for (i = 0; i < nr_frags; i++, frag++) {
|
|
|
|
+ if (!IS_ALIGNED(frag->page_offset, 4)) {
|
|
|
|
+ is_aligned = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!is_aligned) {
|
|
skb = tx_skb_align_workaround(dev, skb);
|
|
skb = tx_skb_align_workaround(dev, skb);
|
|
if (!skb) {
|
|
if (!skb) {
|
|
/*
|
|
/*
|
|
@@ -532,6 +550,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
+
|
|
spin_lock(&fep->tx_lock);
|
|
spin_lock(&fep->tx_lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -539,6 +558,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
*/
|
|
*/
|
|
bdp = fep->cur_tx;
|
|
bdp = fep->cur_tx;
|
|
|
|
|
|
|
|
+ nr_frags = skb_shinfo(skb)->nr_frags;
|
|
if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
|
|
if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
|
|
netif_stop_queue(dev);
|
|
netif_stop_queue(dev);
|
|
spin_unlock(&fep->tx_lock);
|
|
spin_unlock(&fep->tx_lock);
|