|
@@ -1127,7 +1127,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
|
struct sh_eth_txdesc *txdesc = NULL;
|
|
struct sh_eth_txdesc *txdesc = NULL;
|
|
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
|
|
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
|
|
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
|
|
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
|
|
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
|
|
|
|
|
|
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
|
|
dma_addr_t dma_addr;
|
|
dma_addr_t dma_addr;
|
|
|
|
|
|
mdp->cur_rx = 0;
|
|
mdp->cur_rx = 0;
|
|
@@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
|
|
|
|
|
/* RX descriptor */
|
|
/* RX descriptor */
|
|
rxdesc = &mdp->rx_ring[i];
|
|
rxdesc = &mdp->rx_ring[i];
|
|
- /* The size of the buffer is a multiple of 16 bytes. */
|
|
|
|
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
|
|
|
|
|
+ /* The size of the buffer is a multiple of 32 bytes. */
|
|
|
|
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
|
|
dma_addr = dma_map_single(&ndev->dev, skb->data,
|
|
dma_addr = dma_map_single(&ndev->dev, skb->data,
|
|
rxdesc->buffer_length,
|
|
rxdesc->buffer_length,
|
|
DMA_FROM_DEVICE);
|
|
DMA_FROM_DEVICE);
|
|
@@ -1450,7 +1450,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
u16 pkt_len = 0;
|
|
u16 pkt_len = 0;
|
|
u32 desc_status;
|
|
u32 desc_status;
|
|
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
|
|
|
|
|
|
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
|
|
dma_addr_t dma_addr;
|
|
dma_addr_t dma_addr;
|
|
|
|
|
|
boguscnt = min(boguscnt, *quota);
|
|
boguscnt = min(boguscnt, *quota);
|
|
@@ -1506,7 +1506,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|
if (mdp->cd->rpadir)
|
|
if (mdp->cd->rpadir)
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
|
dma_unmap_single(&ndev->dev, rxdesc->addr,
|
|
dma_unmap_single(&ndev->dev, rxdesc->addr,
|
|
- ALIGN(mdp->rx_buf_sz, 16),
|
|
|
|
|
|
+ ALIGN(mdp->rx_buf_sz, 32),
|
|
DMA_FROM_DEVICE);
|
|
DMA_FROM_DEVICE);
|
|
skb_put(skb, pkt_len);
|
|
skb_put(skb, pkt_len);
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
|
@@ -1524,8 +1524,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
|
|
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
|
|
entry = mdp->dirty_rx % mdp->num_rx_ring;
|
|
entry = mdp->dirty_rx % mdp->num_rx_ring;
|
|
rxdesc = &mdp->rx_ring[entry];
|
|
rxdesc = &mdp->rx_ring[entry];
|
|
- /* The size of the buffer is 16 byte boundary. */
|
|
|
|
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
|
|
|
|
|
|
+ /* The size of the buffer is 32 byte boundary. */
|
|
|
|
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
|
|
|
|
|
|
if (mdp->rx_skbuff[entry] == NULL) {
|
|
if (mdp->rx_skbuff[entry] == NULL) {
|
|
skb = netdev_alloc_skb(ndev, skbuff_size);
|
|
skb = netdev_alloc_skb(ndev, skbuff_size);
|