|
@@ -1497,7 +1497,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
int size;
|
|
|
|
|
|
- size = sizeof(struct e1000_buffer) * txdr->count;
|
|
|
+ size = sizeof(struct e1000_tx_buffer) * txdr->count;
|
|
|
txdr->buffer_info = vzalloc(size);
|
|
|
if (!txdr->buffer_info)
|
|
|
return -ENOMEM;
|
|
@@ -1687,7 +1687,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
int size, desc_len;
|
|
|
|
|
|
- size = sizeof(struct e1000_buffer) * rxdr->count;
|
|
|
+ size = sizeof(struct e1000_rx_buffer) * rxdr->count;
|
|
|
rxdr->buffer_info = vzalloc(size);
|
|
|
if (!rxdr->buffer_info)
|
|
|
return -ENOMEM;
|
|
@@ -1947,8 +1947,9 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
|
|
|
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
|
|
|
}
|
|
|
|
|
|
-static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
|
|
|
- struct e1000_buffer *buffer_info)
|
|
|
+static void
|
|
|
+e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
|
|
|
+ struct e1000_tx_buffer *buffer_info)
|
|
|
{
|
|
|
if (buffer_info->dma) {
|
|
|
if (buffer_info->mapped_as_page)
|
|
@@ -1977,7 +1978,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
|
|
|
struct e1000_tx_ring *tx_ring)
|
|
|
{
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_tx_buffer *buffer_info;
|
|
|
unsigned long size;
|
|
|
unsigned int i;
|
|
|
|
|
@@ -1989,7 +1990,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
|
|
|
}
|
|
|
|
|
|
netdev_reset_queue(adapter->netdev);
|
|
|
- size = sizeof(struct e1000_buffer) * tx_ring->count;
|
|
|
+ size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
|
|
|
memset(tx_ring->buffer_info, 0, size);
|
|
|
|
|
|
/* Zero out the descriptor ring */
|
|
@@ -2053,6 +2054,28 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
|
|
|
e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
|
|
|
}
|
|
|
|
|
|
+#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
|
|
|
+static unsigned int e1000_frag_len(const struct e1000_adapter *a)
|
|
|
+{
|
|
|
+ return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
|
|
|
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
+}
|
|
|
+
|
|
|
+static void *e1000_alloc_frag(const struct e1000_adapter *a)
|
|
|
+{
|
|
|
+ unsigned int len = e1000_frag_len(a);
|
|
|
+ u8 *data = netdev_alloc_frag(len);
|
|
|
+
|
|
|
+ if (likely(data))
|
|
|
+ data += E1000_HEADROOM;
|
|
|
+ return data;
|
|
|
+}
|
|
|
+
|
|
|
+static void e1000_free_frag(const void *data)
|
|
|
+{
|
|
|
+ put_page(virt_to_head_page(data));
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* e1000_clean_rx_ring - Free Rx Buffers per Queue
|
|
|
* @adapter: board private structure
|
|
@@ -2062,44 +2085,42 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
|
|
|
struct e1000_rx_ring *rx_ring)
|
|
|
{
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_rx_buffer *buffer_info;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
unsigned long size;
|
|
|
unsigned int i;
|
|
|
|
|
|
- /* Free all the Rx ring sk_buffs */
|
|
|
+ /* Free all the Rx netfrags */
|
|
|
for (i = 0; i < rx_ring->count; i++) {
|
|
|
buffer_info = &rx_ring->buffer_info[i];
|
|
|
- if (buffer_info->dma &&
|
|
|
- adapter->clean_rx == e1000_clean_rx_irq) {
|
|
|
- dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
- buffer_info->length,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- } else if (buffer_info->dma &&
|
|
|
- adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
|
|
|
- dma_unmap_page(&pdev->dev, buffer_info->dma,
|
|
|
- buffer_info->length,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ if (adapter->clean_rx == e1000_clean_rx_irq) {
|
|
|
+ if (buffer_info->dma)
|
|
|
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
+ adapter->rx_buffer_len,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (buffer_info->rxbuf.data) {
|
|
|
+ e1000_free_frag(buffer_info->rxbuf.data);
|
|
|
+ buffer_info->rxbuf.data = NULL;
|
|
|
+ }
|
|
|
+ } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
|
|
|
+ if (buffer_info->dma)
|
|
|
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
|
|
|
+ adapter->rx_buffer_len,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (buffer_info->rxbuf.page) {
|
|
|
+ put_page(buffer_info->rxbuf.page);
|
|
|
+ buffer_info->rxbuf.page = NULL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
buffer_info->dma = 0;
|
|
|
- if (buffer_info->page) {
|
|
|
- put_page(buffer_info->page);
|
|
|
- buffer_info->page = NULL;
|
|
|
- }
|
|
|
- if (buffer_info->skb) {
|
|
|
- dev_kfree_skb(buffer_info->skb);
|
|
|
- buffer_info->skb = NULL;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/* there also may be some cached data from a chained receive */
|
|
|
- if (rx_ring->rx_skb_top) {
|
|
|
- dev_kfree_skb(rx_ring->rx_skb_top);
|
|
|
- rx_ring->rx_skb_top = NULL;
|
|
|
- }
|
|
|
+ napi_free_frags(&adapter->napi);
|
|
|
+ rx_ring->rx_skb_top = NULL;
|
|
|
|
|
|
- size = sizeof(struct e1000_buffer) * rx_ring->count;
|
|
|
+ size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
|
|
|
memset(rx_ring->buffer_info, 0, size);
|
|
|
|
|
|
/* Zero out the descriptor ring */
|
|
@@ -2678,7 +2699,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
|
|
|
__be16 protocol)
|
|
|
{
|
|
|
struct e1000_context_desc *context_desc;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_tx_buffer *buffer_info;
|
|
|
unsigned int i;
|
|
|
u32 cmd_length = 0;
|
|
|
u16 ipcse = 0, tucse, mss;
|
|
@@ -2750,7 +2771,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
|
|
|
__be16 protocol)
|
|
|
{
|
|
|
struct e1000_context_desc *context_desc;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_tx_buffer *buffer_info;
|
|
|
unsigned int i;
|
|
|
u8 css;
|
|
|
u32 cmd_len = E1000_TXD_CMD_DEXT;
|
|
@@ -2809,7 +2830,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
|
|
{
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_tx_buffer *buffer_info;
|
|
|
unsigned int len = skb_headlen(skb);
|
|
|
unsigned int offset = 0, size, count = 0, i;
|
|
|
unsigned int f, bytecount, segs;
|
|
@@ -2955,7 +2976,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
|
|
|
{
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
struct e1000_tx_desc *tx_desc = NULL;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_tx_buffer *buffer_info;
|
|
|
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
|
|
|
unsigned int i;
|
|
|
|
|
@@ -3373,7 +3394,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
|
|
|
|
|
|
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
|
|
|
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
|
|
|
- struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
|
|
|
+ struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
|
|
|
struct my_u { __le64 a; __le64 b; };
|
|
|
struct my_u *u = (struct my_u *)tx_desc;
|
|
|
const char *type;
|
|
@@ -3415,7 +3436,7 @@ rx_ring_summary:
|
|
|
|
|
|
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
|
|
|
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
|
|
|
- struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
|
|
|
+ struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
|
|
|
struct my_u { __le64 a; __le64 b; };
|
|
|
struct my_u *u = (struct my_u *)rx_desc;
|
|
|
const char *type;
|
|
@@ -3429,7 +3450,7 @@ rx_ring_summary:
|
|
|
|
|
|
pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
|
|
|
i, le64_to_cpu(u->a), le64_to_cpu(u->b),
|
|
|
- (u64)buffer_info->dma, buffer_info->skb, type);
|
|
|
+ (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
|
|
|
} /* for */
|
|
|
|
|
|
/* dump the descriptor caches */
|
|
@@ -3811,7 +3832,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
struct e1000_tx_desc *tx_desc, *eop_desc;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
+ struct e1000_tx_buffer *buffer_info;
|
|
|
unsigned int i, eop;
|
|
|
unsigned int count = 0;
|
|
|
unsigned int total_tx_bytes=0, total_tx_packets=0;
|
|
@@ -3949,12 +3970,12 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * e1000_consume_page - helper function
|
|
|
+ * e1000_consume_page - helper function for jumbo Rx path
|
|
|
**/
|
|
|
-static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
|
|
|
+static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
|
|
|
u16 length)
|
|
|
{
|
|
|
- bi->page = NULL;
|
|
|
+ bi->rxbuf.page = NULL;
|
|
|
skb->len += length;
|
|
|
skb->data_len += length;
|
|
|
skb->truesize += PAGE_SIZE;
|
|
@@ -3980,6 +4001,113 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
|
|
|
napi_gro_receive(&adapter->napi, skb);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * e1000_tbi_adjust_stats
|
|
|
+ * @hw: Struct containing variables accessed by shared code
|
|
|
+ * @frame_len: The length of the frame in question
|
|
|
+ * @mac_addr: The Ethernet destination address of the frame in question
|
|
|
+ *
|
|
|
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
|
|
|
+ */
|
|
|
+static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
|
|
|
+ struct e1000_hw_stats *stats,
|
|
|
+ u32 frame_len, const u8 *mac_addr)
|
|
|
+{
|
|
|
+ u64 carry_bit;
|
|
|
+
|
|
|
+ /* First adjust the frame length. */
|
|
|
+ frame_len--;
|
|
|
+ /* We need to adjust the statistics counters, since the hardware
|
|
|
+ * counters overcount this packet as a CRC error and undercount
|
|
|
+ * the packet as a good packet
|
|
|
+ */
|
|
|
+ /* This packet should not be counted as a CRC error. */
|
|
|
+ stats->crcerrs--;
|
|
|
+ /* This packet does count as a Good Packet Received. */
|
|
|
+ stats->gprc++;
|
|
|
+
|
|
|
+ /* Adjust the Good Octets received counters */
|
|
|
+ carry_bit = 0x80000000 & stats->gorcl;
|
|
|
+ stats->gorcl += frame_len;
|
|
|
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
|
|
|
+ * Received Count) was one before the addition,
|
|
|
+ * AND it is zero after, then we lost the carry out,
|
|
|
+ * need to add one to Gorch (Good Octets Received Count High).
|
|
|
+ * This could be simplified if all environments supported
|
|
|
+ * 64-bit integers.
|
|
|
+ */
|
|
|
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
|
|
|
+ stats->gorch++;
|
|
|
+ /* Is this a broadcast or multicast? Check broadcast first,
|
|
|
+ * since the test for a multicast frame will test positive on
|
|
|
+ * a broadcast frame.
|
|
|
+ */
|
|
|
+ if (is_broadcast_ether_addr(mac_addr))
|
|
|
+ stats->bprc++;
|
|
|
+ else if (is_multicast_ether_addr(mac_addr))
|
|
|
+ stats->mprc++;
|
|
|
+
|
|
|
+ if (frame_len == hw->max_frame_size) {
|
|
|
+ /* In this case, the hardware has overcounted the number of
|
|
|
+ * oversize frames.
|
|
|
+ */
|
|
|
+ if (stats->roc > 0)
|
|
|
+ stats->roc--;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Adjust the bin counters when the extra byte put the frame in the
|
|
|
+ * wrong bin. Remember that the frame_len was adjusted above.
|
|
|
+ */
|
|
|
+ if (frame_len == 64) {
|
|
|
+ stats->prc64++;
|
|
|
+ stats->prc127--;
|
|
|
+ } else if (frame_len == 127) {
|
|
|
+ stats->prc127++;
|
|
|
+ stats->prc255--;
|
|
|
+ } else if (frame_len == 255) {
|
|
|
+ stats->prc255++;
|
|
|
+ stats->prc511--;
|
|
|
+ } else if (frame_len == 511) {
|
|
|
+ stats->prc511++;
|
|
|
+ stats->prc1023--;
|
|
|
+ } else if (frame_len == 1023) {
|
|
|
+ stats->prc1023++;
|
|
|
+ stats->prc1522--;
|
|
|
+ } else if (frame_len == 1522) {
|
|
|
+ stats->prc1522++;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
|
|
|
+ u8 status, u8 errors,
|
|
|
+ u32 length, const u8 *data)
|
|
|
+{
|
|
|
+ struct e1000_hw *hw = &adapter->hw;
|
|
|
+ u8 last_byte = *(data + length - 1);
|
|
|
+
|
|
|
+ if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
|
|
|
+ unsigned long irq_flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&adapter->stats_lock, irq_flags);
|
|
|
+ e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
|
|
|
+ spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
|
|
|
+
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
|
|
|
+ unsigned int bufsz)
|
|
|
+{
|
|
|
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
|
|
|
+
|
|
|
+ if (unlikely(!skb))
|
|
|
+ adapter->alloc_rx_buff_failed++;
|
|
|
+ return skb;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
|
|
|
* @adapter: board private structure
|
|
@@ -3994,12 +4122,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|
|
struct e1000_rx_ring *rx_ring,
|
|
|
int *work_done, int work_to_do)
|
|
|
{
|
|
|
- struct e1000_hw *hw = &adapter->hw;
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
struct e1000_rx_desc *rx_desc, *next_rxd;
|
|
|
- struct e1000_buffer *buffer_info, *next_buffer;
|
|
|
- unsigned long irq_flags;
|
|
|
+ struct e1000_rx_buffer *buffer_info, *next_buffer;
|
|
|
u32 length;
|
|
|
unsigned int i;
|
|
|
int cleaned_count = 0;
|
|
@@ -4020,8 +4146,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|
|
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
|
|
|
|
|
status = rx_desc->status;
|
|
|
- skb = buffer_info->skb;
|
|
|
- buffer_info->skb = NULL;
|
|
|
|
|
|
if (++i == rx_ring->count) i = 0;
|
|
|
next_rxd = E1000_RX_DESC(*rx_ring, i);
|
|
@@ -4032,7 +4156,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|
|
cleaned = true;
|
|
|
cleaned_count++;
|
|
|
dma_unmap_page(&pdev->dev, buffer_info->dma,
|
|
|
- buffer_info->length, DMA_FROM_DEVICE);
|
|
|
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
|
|
|
buffer_info->dma = 0;
|
|
|
|
|
|
length = le16_to_cpu(rx_desc->length);
|
|
@@ -4040,25 +4164,15 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|
|
/* errors is only valid for DD + EOP descriptors */
|
|
|
if (unlikely((status & E1000_RXD_STAT_EOP) &&
|
|
|
(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
|
|
|
- u8 *mapped;
|
|
|
- u8 last_byte;
|
|
|
-
|
|
|
- mapped = page_address(buffer_info->page);
|
|
|
- last_byte = *(mapped + length - 1);
|
|
|
- if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
|
|
|
- last_byte)) {
|
|
|
- spin_lock_irqsave(&adapter->stats_lock,
|
|
|
- irq_flags);
|
|
|
- e1000_tbi_adjust_stats(hw, &adapter->stats,
|
|
|
- length, mapped);
|
|
|
- spin_unlock_irqrestore(&adapter->stats_lock,
|
|
|
- irq_flags);
|
|
|
+ u8 *mapped = page_address(buffer_info->rxbuf.page);
|
|
|
+
|
|
|
+ if (e1000_tbi_should_accept(adapter, status,
|
|
|
+ rx_desc->errors,
|
|
|
+ length, mapped)) {
|
|
|
length--;
|
|
|
+ } else if (netdev->features & NETIF_F_RXALL) {
|
|
|
+ goto process_skb;
|
|
|
} else {
|
|
|
- if (netdev->features & NETIF_F_RXALL)
|
|
|
- goto process_skb;
|
|
|
- /* recycle both page and skb */
|
|
|
- buffer_info->skb = skb;
|
|
|
/* an error means any chain goes out the window
|
|
|
* too
|
|
|
*/
|
|
@@ -4075,16 +4189,18 @@ process_skb:
|
|
|
/* this descriptor is only the beginning (or middle) */
|
|
|
if (!rxtop) {
|
|
|
/* this is the beginning of a chain */
|
|
|
- rxtop = skb;
|
|
|
- skb_fill_page_desc(rxtop, 0, buffer_info->page,
|
|
|
+ rxtop = napi_get_frags(&adapter->napi);
|
|
|
+ if (!rxtop)
|
|
|
+ break;
|
|
|
+
|
|
|
+ skb_fill_page_desc(rxtop, 0,
|
|
|
+ buffer_info->rxbuf.page,
|
|
|
0, length);
|
|
|
} else {
|
|
|
/* this is the middle of a chain */
|
|
|
skb_fill_page_desc(rxtop,
|
|
|
skb_shinfo(rxtop)->nr_frags,
|
|
|
- buffer_info->page, 0, length);
|
|
|
- /* re-use the skb, only consumed the page */
|
|
|
- buffer_info->skb = skb;
|
|
|
+ buffer_info->rxbuf.page, 0, length);
|
|
|
}
|
|
|
e1000_consume_page(buffer_info, rxtop, length);
|
|
|
goto next_desc;
|
|
@@ -4093,32 +4209,51 @@ process_skb:
|
|
|
/* end of the chain */
|
|
|
skb_fill_page_desc(rxtop,
|
|
|
skb_shinfo(rxtop)->nr_frags,
|
|
|
- buffer_info->page, 0, length);
|
|
|
- /* re-use the current skb, we only consumed the
|
|
|
- * page
|
|
|
- */
|
|
|
- buffer_info->skb = skb;
|
|
|
+ buffer_info->rxbuf.page, 0, length);
|
|
|
skb = rxtop;
|
|
|
rxtop = NULL;
|
|
|
e1000_consume_page(buffer_info, skb, length);
|
|
|
} else {
|
|
|
+ struct page *p;
|
|
|
/* no chain, got EOP, this buf is the packet
|
|
|
* copybreak to save the put_page/alloc_page
|
|
|
*/
|
|
|
- if (length <= copybreak &&
|
|
|
- skb_tailroom(skb) >= length) {
|
|
|
+ p = buffer_info->rxbuf.page;
|
|
|
+ if (length <= copybreak) {
|
|
|
u8 *vaddr;
|
|
|
- vaddr = kmap_atomic(buffer_info->page);
|
|
|
+
|
|
|
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
|
|
|
+ length -= 4;
|
|
|
+ skb = e1000_alloc_rx_skb(adapter,
|
|
|
+ length);
|
|
|
+ if (!skb)
|
|
|
+ break;
|
|
|
+
|
|
|
+ vaddr = kmap_atomic(p);
|
|
|
memcpy(skb_tail_pointer(skb), vaddr,
|
|
|
length);
|
|
|
kunmap_atomic(vaddr);
|
|
|
/* re-use the page, so don't erase
|
|
|
- * buffer_info->page
|
|
|
+ * buffer_info->rxbuf.page
|
|
|
*/
|
|
|
skb_put(skb, length);
|
|
|
+ e1000_rx_checksum(adapter,
|
|
|
+ status | rx_desc->errors << 24,
|
|
|
+ le16_to_cpu(rx_desc->csum), skb);
|
|
|
+
|
|
|
+ total_rx_bytes += skb->len;
|
|
|
+ total_rx_packets++;
|
|
|
+
|
|
|
+ e1000_receive_skb(adapter, status,
|
|
|
+ rx_desc->special, skb);
|
|
|
+ goto next_desc;
|
|
|
} else {
|
|
|
- skb_fill_page_desc(skb, 0,
|
|
|
- buffer_info->page, 0,
|
|
|
+ skb = napi_get_frags(&adapter->napi);
|
|
|
+ if (!skb) {
|
|
|
+ adapter->alloc_rx_buff_failed++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ skb_fill_page_desc(skb, 0, p, 0,
|
|
|
length);
|
|
|
e1000_consume_page(buffer_info, skb,
|
|
|
length);
|
|
@@ -4137,14 +4272,14 @@ process_skb:
|
|
|
pskb_trim(skb, skb->len - 4);
|
|
|
total_rx_packets++;
|
|
|
|
|
|
- /* eth type trans needs skb->data to point to something */
|
|
|
- if (!pskb_may_pull(skb, ETH_HLEN)) {
|
|
|
- e_err(drv, "pskb_may_pull failed.\n");
|
|
|
- dev_kfree_skb(skb);
|
|
|
- goto next_desc;
|
|
|
+ if (status & E1000_RXD_STAT_VP) {
|
|
|
+ __le16 vlan = rx_desc->special;
|
|
|
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
|
|
|
+
|
|
|
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
|
|
}
|
|
|
|
|
|
- e1000_receive_skb(adapter, status, rx_desc->special, skb);
|
|
|
+ napi_gro_frags(&adapter->napi);
|
|
|
|
|
|
next_desc:
|
|
|
rx_desc->status = 0;
|
|
@@ -4175,25 +4310,25 @@ next_desc:
|
|
|
/* this should improve performance for small packets with large amounts
|
|
|
* of reassembly being done in the stack
|
|
|
*/
|
|
|
-static void e1000_check_copybreak(struct net_device *netdev,
|
|
|
- struct e1000_buffer *buffer_info,
|
|
|
- u32 length, struct sk_buff **skb)
|
|
|
+static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
|
|
|
+ struct e1000_rx_buffer *buffer_info,
|
|
|
+ u32 length, const void *data)
|
|
|
{
|
|
|
- struct sk_buff *new_skb;
|
|
|
+ struct sk_buff *skb;
|
|
|
|
|
|
if (length > copybreak)
|
|
|
- return;
|
|
|
+ return NULL;
|
|
|
|
|
|
- new_skb = netdev_alloc_skb_ip_align(netdev, length);
|
|
|
- if (!new_skb)
|
|
|
- return;
|
|
|
+ skb = e1000_alloc_rx_skb(adapter, length);
|
|
|
+ if (!skb)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
|
|
|
+ length, DMA_FROM_DEVICE);
|
|
|
|
|
|
- skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
|
|
|
- (*skb)->data - NET_IP_ALIGN,
|
|
|
- length + NET_IP_ALIGN);
|
|
|
- /* save the skb in buffer_info as good */
|
|
|
- buffer_info->skb = *skb;
|
|
|
- *skb = new_skb;
|
|
|
+ memcpy(skb_put(skb, length), data, length);
|
|
|
+
|
|
|
+ return skb;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4207,12 +4342,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|
|
struct e1000_rx_ring *rx_ring,
|
|
|
int *work_done, int work_to_do)
|
|
|
{
|
|
|
- struct e1000_hw *hw = &adapter->hw;
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
struct e1000_rx_desc *rx_desc, *next_rxd;
|
|
|
- struct e1000_buffer *buffer_info, *next_buffer;
|
|
|
- unsigned long flags;
|
|
|
+ struct e1000_rx_buffer *buffer_info, *next_buffer;
|
|
|
u32 length;
|
|
|
unsigned int i;
|
|
|
int cleaned_count = 0;
|
|
@@ -4225,6 +4358,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|
|
|
|
|
while (rx_desc->status & E1000_RXD_STAT_DD) {
|
|
|
struct sk_buff *skb;
|
|
|
+ u8 *data;
|
|
|
u8 status;
|
|
|
|
|
|
if (*work_done >= work_to_do)
|
|
@@ -4233,10 +4367,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|
|
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
|
|
|
|
|
status = rx_desc->status;
|
|
|
- skb = buffer_info->skb;
|
|
|
- buffer_info->skb = NULL;
|
|
|
+ length = le16_to_cpu(rx_desc->length);
|
|
|
+
|
|
|
+ data = buffer_info->rxbuf.data;
|
|
|
+ prefetch(data);
|
|
|
+ skb = e1000_copybreak(adapter, buffer_info, length, data);
|
|
|
+ if (!skb) {
|
|
|
+ unsigned int frag_len = e1000_frag_len(adapter);
|
|
|
+
|
|
|
+ skb = build_skb(data - E1000_HEADROOM, frag_len);
|
|
|
+ if (!skb) {
|
|
|
+ adapter->alloc_rx_buff_failed++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- prefetch(skb->data - NET_IP_ALIGN);
|
|
|
+ skb_reserve(skb, E1000_HEADROOM);
|
|
|
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
+ adapter->rx_buffer_len,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ buffer_info->dma = 0;
|
|
|
+ buffer_info->rxbuf.data = NULL;
|
|
|
+ }
|
|
|
|
|
|
if (++i == rx_ring->count) i = 0;
|
|
|
next_rxd = E1000_RX_DESC(*rx_ring, i);
|
|
@@ -4246,11 +4397,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|
|
|
|
|
cleaned = true;
|
|
|
cleaned_count++;
|
|
|
- dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
- buffer_info->length, DMA_FROM_DEVICE);
|
|
|
- buffer_info->dma = 0;
|
|
|
|
|
|
- length = le16_to_cpu(rx_desc->length);
|
|
|
/* !EOP means multiple descriptors were used to store a single
|
|
|
* packet, if thats the case we need to toss it. In fact, we
|
|
|
* to toss every packet with the EOP bit clear and the next
|
|
@@ -4262,29 +4409,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|
|
|
|
|
if (adapter->discarding) {
|
|
|
/* All receives must fit into a single buffer */
|
|
|
- e_dbg("Receive packet consumed multiple buffers\n");
|
|
|
- /* recycle */
|
|
|
- buffer_info->skb = skb;
|
|
|
+ netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
|
|
|
+ dev_kfree_skb(skb);
|
|
|
if (status & E1000_RXD_STAT_EOP)
|
|
|
adapter->discarding = false;
|
|
|
goto next_desc;
|
|
|
}
|
|
|
|
|
|
if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
|
|
|
- u8 last_byte = *(skb->data + length - 1);
|
|
|
- if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
|
|
|
- last_byte)) {
|
|
|
- spin_lock_irqsave(&adapter->stats_lock, flags);
|
|
|
- e1000_tbi_adjust_stats(hw, &adapter->stats,
|
|
|
- length, skb->data);
|
|
|
- spin_unlock_irqrestore(&adapter->stats_lock,
|
|
|
- flags);
|
|
|
+ if (e1000_tbi_should_accept(adapter, status,
|
|
|
+ rx_desc->errors,
|
|
|
+ length, data)) {
|
|
|
length--;
|
|
|
+ } else if (netdev->features & NETIF_F_RXALL) {
|
|
|
+ goto process_skb;
|
|
|
} else {
|
|
|
- if (netdev->features & NETIF_F_RXALL)
|
|
|
- goto process_skb;
|
|
|
- /* recycle */
|
|
|
- buffer_info->skb = skb;
|
|
|
+ dev_kfree_skb(skb);
|
|
|
goto next_desc;
|
|
|
}
|
|
|
}
|
|
@@ -4299,9 +4439,10 @@ process_skb:
|
|
|
*/
|
|
|
length -= 4;
|
|
|
|
|
|
- e1000_check_copybreak(netdev, buffer_info, length, &skb);
|
|
|
-
|
|
|
- skb_put(skb, length);
|
|
|
+ if (buffer_info->rxbuf.data == NULL)
|
|
|
+ skb_put(skb, length);
|
|
|
+ else /* copybreak skb */
|
|
|
+ skb_trim(skb, length);
|
|
|
|
|
|
/* Receive Checksum Offload */
|
|
|
e1000_rx_checksum(adapter,
|
|
@@ -4347,38 +4488,19 @@ static void
|
|
|
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
|
|
struct e1000_rx_ring *rx_ring, int cleaned_count)
|
|
|
{
|
|
|
- struct net_device *netdev = adapter->netdev;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
struct e1000_rx_desc *rx_desc;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct e1000_rx_buffer *buffer_info;
|
|
|
unsigned int i;
|
|
|
- unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
|
|
|
|
|
|
i = rx_ring->next_to_use;
|
|
|
buffer_info = &rx_ring->buffer_info[i];
|
|
|
|
|
|
while (cleaned_count--) {
|
|
|
- skb = buffer_info->skb;
|
|
|
- if (skb) {
|
|
|
- skb_trim(skb, 0);
|
|
|
- goto check_page;
|
|
|
- }
|
|
|
-
|
|
|
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
|
|
- if (unlikely(!skb)) {
|
|
|
- /* Better luck next round */
|
|
|
- adapter->alloc_rx_buff_failed++;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- buffer_info->skb = skb;
|
|
|
- buffer_info->length = adapter->rx_buffer_len;
|
|
|
-check_page:
|
|
|
/* allocate a new page if necessary */
|
|
|
- if (!buffer_info->page) {
|
|
|
- buffer_info->page = alloc_page(GFP_ATOMIC);
|
|
|
- if (unlikely(!buffer_info->page)) {
|
|
|
+ if (!buffer_info->rxbuf.page) {
|
|
|
+ buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
|
|
|
+ if (unlikely(!buffer_info->rxbuf.page)) {
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
break;
|
|
|
}
|
|
@@ -4386,17 +4508,15 @@ check_page:
|
|
|
|
|
|
if (!buffer_info->dma) {
|
|
|
buffer_info->dma = dma_map_page(&pdev->dev,
|
|
|
- buffer_info->page, 0,
|
|
|
- buffer_info->length,
|
|
|
+ buffer_info->rxbuf.page, 0,
|
|
|
+ adapter->rx_buffer_len,
|
|
|
DMA_FROM_DEVICE);
|
|
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
|
|
|
- put_page(buffer_info->page);
|
|
|
- dev_kfree_skb(skb);
|
|
|
- buffer_info->page = NULL;
|
|
|
- buffer_info->skb = NULL;
|
|
|
+ put_page(buffer_info->rxbuf.page);
|
|
|
+ buffer_info->rxbuf.page = NULL;
|
|
|
buffer_info->dma = 0;
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
- break; /* while !buffer_info->skb */
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4432,11 +4552,9 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
|
|
int cleaned_count)
|
|
|
{
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
- struct net_device *netdev = adapter->netdev;
|
|
|
struct pci_dev *pdev = adapter->pdev;
|
|
|
struct e1000_rx_desc *rx_desc;
|
|
|
- struct e1000_buffer *buffer_info;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct e1000_rx_buffer *buffer_info;
|
|
|
unsigned int i;
|
|
|
unsigned int bufsz = adapter->rx_buffer_len;
|
|
|
|
|
@@ -4444,57 +4562,52 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
|
|
buffer_info = &rx_ring->buffer_info[i];
|
|
|
|
|
|
while (cleaned_count--) {
|
|
|
- skb = buffer_info->skb;
|
|
|
- if (skb) {
|
|
|
- skb_trim(skb, 0);
|
|
|
- goto map_skb;
|
|
|
- }
|
|
|
+ void *data;
|
|
|
|
|
|
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
|
|
- if (unlikely(!skb)) {
|
|
|
+ if (buffer_info->rxbuf.data)
|
|
|
+ goto skip;
|
|
|
+
|
|
|
+ data = e1000_alloc_frag(adapter);
|
|
|
+ if (!data) {
|
|
|
/* Better luck next round */
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
/* Fix for errata 23, can't cross 64kB boundary */
|
|
|
- if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
|
|
|
- struct sk_buff *oldskb = skb;
|
|
|
+ if (!e1000_check_64k_bound(adapter, data, bufsz)) {
|
|
|
+ void *olddata = data;
|
|
|
e_err(rx_err, "skb align check failed: %u bytes at "
|
|
|
- "%p\n", bufsz, skb->data);
|
|
|
+ "%p\n", bufsz, data);
|
|
|
/* Try again, without freeing the previous */
|
|
|
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
|
|
+ data = e1000_alloc_frag(adapter);
|
|
|
/* Failed allocation, critical failure */
|
|
|
- if (!skb) {
|
|
|
- dev_kfree_skb(oldskb);
|
|
|
+ if (!data) {
|
|
|
+ e1000_free_frag(olddata);
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
|
|
|
+ if (!e1000_check_64k_bound(adapter, data, bufsz)) {
|
|
|
/* give up */
|
|
|
- dev_kfree_skb(skb);
|
|
|
- dev_kfree_skb(oldskb);
|
|
|
+ e1000_free_frag(data);
|
|
|
+ e1000_free_frag(olddata);
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
- break; /* while !buffer_info->skb */
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
/* Use new allocation */
|
|
|
- dev_kfree_skb(oldskb);
|
|
|
+ e1000_free_frag(olddata);
|
|
|
}
|
|
|
- buffer_info->skb = skb;
|
|
|
- buffer_info->length = adapter->rx_buffer_len;
|
|
|
-map_skb:
|
|
|
buffer_info->dma = dma_map_single(&pdev->dev,
|
|
|
- skb->data,
|
|
|
- buffer_info->length,
|
|
|
+ data,
|
|
|
+ adapter->rx_buffer_len,
|
|
|
DMA_FROM_DEVICE);
|
|
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
|
|
|
- dev_kfree_skb(skb);
|
|
|
- buffer_info->skb = NULL;
|
|
|
+ e1000_free_frag(data);
|
|
|
buffer_info->dma = 0;
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
- break; /* while !buffer_info->skb */
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
/* XXX if it was allocated cleanly it will never map to a
|
|
@@ -4508,17 +4621,20 @@ map_skb:
|
|
|
e_err(rx_err, "dma align check failed: %u bytes at "
|
|
|
"%p\n", adapter->rx_buffer_len,
|
|
|
(void *)(unsigned long)buffer_info->dma);
|
|
|
- dev_kfree_skb(skb);
|
|
|
- buffer_info->skb = NULL;
|
|
|
|
|
|
dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
adapter->rx_buffer_len,
|
|
|
DMA_FROM_DEVICE);
|
|
|
+
|
|
|
+ e1000_free_frag(data);
|
|
|
+ buffer_info->rxbuf.data = NULL;
|
|
|
buffer_info->dma = 0;
|
|
|
|
|
|
adapter->alloc_rx_buff_failed++;
|
|
|
- break; /* while !buffer_info->skb */
|
|
|
+ break;
|
|
|
}
|
|
|
+ buffer_info->rxbuf.data = data;
|
|
|
+ skip:
|
|
|
rx_desc = E1000_RX_DESC(*rx_ring, i);
|
|
|
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
|
|
|