|
@@ -236,6 +236,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
|
|
|
#define FEC_PAUSE_FLAG_AUTONEG 0x1
|
|
|
#define FEC_PAUSE_FLAG_ENABLE 0x2
|
|
|
|
|
|
+#define COPYBREAK_DEFAULT 256
|
|
|
+
|
|
|
#define TSO_HEADER_SIZE 128
|
|
|
/* Max number of allowed TCP segments for software TSO */
|
|
|
#define FEC_MAX_TSO_SEGS 100
|
|
@@ -1322,6 +1324,50 @@ fec_enet_tx(struct net_device *ndev)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct fec_enet_private *fep = netdev_priv(ndev);
|
|
|
+ int off;
|
|
|
+
|
|
|
+ off = ((unsigned long)skb->data) & fep->rx_align;
|
|
|
+ if (off)
|
|
|
+ skb_reserve(skb, fep->rx_align + 1 - off);
|
|
|
+
|
|
|
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
|
|
|
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
|
|
|
+ if (net_ratelimit())
|
|
|
+ netdev_err(ndev, "Rx DMA memory map failed\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
|
|
|
+ struct bufdesc *bdp, u32 length)
|
|
|
+{
|
|
|
+ struct fec_enet_private *fep = netdev_priv(ndev);
|
|
|
+ struct sk_buff *new_skb;
|
|
|
+
|
|
|
+ if (length > fep->rx_copybreak)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ new_skb = netdev_alloc_skb(ndev, length);
|
|
|
+ if (!new_skb)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
|
|
|
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ memcpy(new_skb->data, (*skb)->data, length);
|
|
|
+ *skb = new_skb;
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/* During a receive, the cur_rx points to the current incoming buffer.
|
|
|
* When we update through the ring, if the next incoming buffer has
|
|
|
* not been given to the system, we just set the empty indicator,
|
|
@@ -1336,7 +1382,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|
|
struct fec_enet_priv_rx_q *rxq;
|
|
|
struct bufdesc *bdp;
|
|
|
unsigned short status;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct sk_buff *skb_new = NULL;
|
|
|
+ struct sk_buff *skb;
|
|
|
ushort pkt_len;
|
|
|
__u8 *data;
|
|
|
int pkt_received = 0;
|
|
@@ -1344,6 +1391,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|
|
bool vlan_packet_rcvd = false;
|
|
|
u16 vlan_tag;
|
|
|
int index = 0;
|
|
|
+ bool is_copybreak;
|
|
|
|
|
|
#ifdef CONFIG_M532x
|
|
|
flush_cache_all();
|
|
@@ -1401,11 +1449,27 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|
|
ndev->stats.rx_bytes += pkt_len;
|
|
|
|
|
|
index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
|
|
|
- data = rxq->rx_skbuff[index]->data;
|
|
|
- dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
|
|
|
- FEC_ENET_RX_FRSIZE - fep->rx_align,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ skb = rxq->rx_skbuff[index];
|
|
|
|
|
|
+ /* The packet length includes FCS, but we don't want to
|
|
|
+ * include that when passing upstream as it messes up
|
|
|
+ * bridging applications.
|
|
|
+ */
|
|
|
+ is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
|
|
|
+ if (!is_copybreak) {
|
|
|
+ skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
|
|
|
+ if (unlikely(!skb_new)) {
|
|
|
+ ndev->stats.rx_dropped++;
|
|
|
+ goto rx_processing_done;
|
|
|
+ }
|
|
|
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
|
|
|
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ }
|
|
|
+
|
|
|
+ prefetch(skb->data - NET_IP_ALIGN);
|
|
|
+ skb_put(skb, pkt_len - 4);
|
|
|
+ data = skb->data;
|
|
|
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
|
|
|
swap_buffer(data, pkt_len);
|
|
|
|
|
@@ -1422,62 +1486,48 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
|
|
struct vlan_hdr *vlan_header =
|
|
|
(struct vlan_hdr *) (data + ETH_HLEN);
|
|
|
vlan_tag = ntohs(vlan_header->h_vlan_TCI);
|
|
|
- pkt_len -= VLAN_HLEN;
|
|
|
|
|
|
vlan_packet_rcvd = true;
|
|
|
+
|
|
|
+ skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
|
|
|
+ data, (2 * ETH_ALEN));
|
|
|
+ skb_pull(skb, VLAN_HLEN);
|
|
|
}
|
|
|
|
|
|
- /* This does 16 byte alignment, exactly what we need.
|
|
|
- * The packet length includes FCS, but we don't want to
|
|
|
- * include that when passing upstream as it messes up
|
|
|
- * bridging applications.
|
|
|
- */
|
|
|
- skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
|
|
|
+ skb->protocol = eth_type_trans(skb, ndev);
|
|
|
|
|
|
- if (unlikely(!skb)) {
|
|
|
- ndev->stats.rx_dropped++;
|
|
|
- } else {
|
|
|
- int payload_offset = (2 * ETH_ALEN);
|
|
|
- skb_reserve(skb, NET_IP_ALIGN);
|
|
|
- skb_put(skb, pkt_len - 4); /* Make room */
|
|
|
-
|
|
|
- /* Extract the frame data without the VLAN header. */
|
|
|
- skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
|
|
|
- if (vlan_packet_rcvd)
|
|
|
- payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
|
|
|
- skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
|
|
|
- data + payload_offset,
|
|
|
- pkt_len - 4 - (2 * ETH_ALEN));
|
|
|
-
|
|
|
- skb->protocol = eth_type_trans(skb, ndev);
|
|
|
-
|
|
|
- /* Get receive timestamp from the skb */
|
|
|
- if (fep->hwts_rx_en && fep->bufdesc_ex)
|
|
|
- fec_enet_hwtstamp(fep, ebdp->ts,
|
|
|
- skb_hwtstamps(skb));
|
|
|
-
|
|
|
- if (fep->bufdesc_ex &&
|
|
|
- (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
|
|
|
- if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
|
|
|
- /* don't check it */
|
|
|
- skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
- } else {
|
|
|
- skb_checksum_none_assert(skb);
|
|
|
- }
|
|
|
+ /* Get receive timestamp from the skb */
|
|
|
+ if (fep->hwts_rx_en && fep->bufdesc_ex)
|
|
|
+ fec_enet_hwtstamp(fep, ebdp->ts,
|
|
|
+ skb_hwtstamps(skb));
|
|
|
+
|
|
|
+ if (fep->bufdesc_ex &&
|
|
|
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
|
|
|
+ if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
|
|
|
+ /* don't check it */
|
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
+ } else {
|
|
|
+ skb_checksum_none_assert(skb);
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- /* Handle received VLAN packets */
|
|
|
- if (vlan_packet_rcvd)
|
|
|
- __vlan_hwaccel_put_tag(skb,
|
|
|
- htons(ETH_P_8021Q),
|
|
|
- vlan_tag);
|
|
|
+ /* Handle received VLAN packets */
|
|
|
+ if (vlan_packet_rcvd)
|
|
|
+ __vlan_hwaccel_put_tag(skb,
|
|
|
+ htons(ETH_P_8021Q),
|
|
|
+ vlan_tag);
|
|
|
|
|
|
- napi_gro_receive(&fep->napi, skb);
|
|
|
+ napi_gro_receive(&fep->napi, skb);
|
|
|
+
|
|
|
+ if (is_copybreak) {
|
|
|
+ dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
|
|
|
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ } else {
|
|
|
+ rxq->rx_skbuff[index] = skb_new;
|
|
|
+ fec_enet_new_rxbdp(ndev, bdp, skb_new);
|
|
|
}
|
|
|
|
|
|
- dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
|
|
|
- FEC_ENET_RX_FRSIZE - fep->rx_align,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
rx_processing_done:
|
|
|
/* Clear the status flags for this buffer */
|
|
|
status &= ~BD_ENET_RX_STATS;
|
|
@@ -2392,6 +2442,44 @@ static void fec_enet_itr_coal_init(struct net_device *ndev)
|
|
|
fec_enet_set_coalesce(ndev, &ec);
|
|
|
}
|
|
|
|
|
|
+static int fec_enet_get_tunable(struct net_device *netdev,
|
|
|
+ const struct ethtool_tunable *tuna,
|
|
|
+ void *data)
|
|
|
+{
|
|
|
+ struct fec_enet_private *fep = netdev_priv(netdev);
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ switch (tuna->id) {
|
|
|
+ case ETHTOOL_RX_COPYBREAK:
|
|
|
+ *(u32 *)data = fep->rx_copybreak;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ret = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int fec_enet_set_tunable(struct net_device *netdev,
|
|
|
+ const struct ethtool_tunable *tuna,
|
|
|
+ const void *data)
|
|
|
+{
|
|
|
+ struct fec_enet_private *fep = netdev_priv(netdev);
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ switch (tuna->id) {
|
|
|
+ case ETHTOOL_RX_COPYBREAK:
|
|
|
+ fep->rx_copybreak = *(u32 *)data;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ret = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static const struct ethtool_ops fec_enet_ethtool_ops = {
|
|
|
.get_settings = fec_enet_get_settings,
|
|
|
.set_settings = fec_enet_set_settings,
|
|
@@ -2408,6 +2496,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
|
|
|
.get_sset_count = fec_enet_get_sset_count,
|
|
|
#endif
|
|
|
.get_ts_info = fec_enet_get_ts_info,
|
|
|
+ .get_tunable = fec_enet_get_tunable,
|
|
|
+ .set_tunable = fec_enet_set_tunable,
|
|
|
};
|
|
|
|
|
|
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
|
|
@@ -2553,33 +2643,20 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
|
|
|
struct sk_buff *skb;
|
|
|
struct bufdesc *bdp;
|
|
|
struct fec_enet_priv_rx_q *rxq;
|
|
|
- unsigned int off;
|
|
|
|
|
|
rxq = fep->rx_queue[queue];
|
|
|
bdp = rxq->rx_bd_base;
|
|
|
for (i = 0; i < rxq->rx_ring_size; i++) {
|
|
|
- dma_addr_t addr;
|
|
|
-
|
|
|
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
|
|
|
if (!skb)
|
|
|
goto err_alloc;
|
|
|
|
|
|
- off = ((unsigned long)skb->data) & fep->rx_align;
|
|
|
- if (off)
|
|
|
- skb_reserve(skb, fep->rx_align + 1 - off);
|
|
|
-
|
|
|
- addr = dma_map_single(&fep->pdev->dev, skb->data,
|
|
|
- FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- if (dma_mapping_error(&fep->pdev->dev, addr)) {
|
|
|
+ if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
|
|
|
dev_kfree_skb(skb);
|
|
|
- if (net_ratelimit())
|
|
|
- netdev_err(ndev, "Rx DMA memory map failed\n");
|
|
|
goto err_alloc;
|
|
|
}
|
|
|
|
|
|
rxq->rx_skbuff[i] = skb;
|
|
|
- bdp->cbd_bufaddr = addr;
|
|
|
bdp->cbd_sc = BD_ENET_RX_EMPTY;
|
|
|
|
|
|
if (fep->bufdesc_ex) {
|
|
@@ -3240,6 +3317,7 @@ fec_probe(struct platform_device *pdev)
|
|
|
if (fep->bufdesc_ex && fep->ptp_clock)
|
|
|
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
|
|
|
|
|
|
+ fep->rx_copybreak = COPYBREAK_DEFAULT;
|
|
|
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
|
|
|
return 0;
|
|
|
|