浏览代码

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/xen-netback/netback.c
	net/core/filter.c

A filter bug fix overlapped some cleanups and a conversion
over to some new insn generation macros.

A xen-netback bug fix overlapped the addition of multi-queue
support.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 11 年之前
父节点
当前提交
f666f87b94

+ 2 - 2
drivers/net/ethernet/amd/hplance.c

@@ -27,9 +27,9 @@
 
 #include "hplance.h"
 
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 16392 bytes of RAM for the init block and buffers. This places
  * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
  */
 #define LANCE_LOG_TX_BUFFERS 1
 #define LANCE_LOG_RX_BUFFERS 3

+ 3 - 3
drivers/net/ethernet/amd/mvme147.c

@@ -26,9 +26,9 @@
 #include <asm/pgtable.h>
 #include <asm/mvme147hw.h>
 
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 32K of RAM for the init block and buffers. This places
  * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
  */
 #define LANCE_LOG_TX_BUFFERS 1
 #define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
 	       dev->dev_addr);
 
 	lp = netdev_priv(dev);
-	lp->ram = __get_dma_pages(GFP_ATOMIC, 3);	/* 16K */
+	lp->ram = __get_dma_pages(GFP_ATOMIC, 3);	/* 32K */
 	if (!lp->ram) {
 		printk("%s: No memory for LANCE buffers\n", dev->name);
 		free_netdev(dev);

+ 3 - 3
drivers/net/ethernet/freescale/gianfar.c

@@ -1384,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
 
 	gfar_hw_init(priv);
 
+	/* Carrier starts down, phylib will bring it up */
+	netif_carrier_off(dev);
+
 	err = register_netdev(dev);
 
 	if (err) {
@@ -1391,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
 		goto register_fail;
 	}
 
-	/* Carrier starts down, phylib will bring it up */
-	netif_carrier_off(dev);
-
 	device_init_wakeup(&dev->dev,
 			   priv->device_flags &
 			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);

+ 3 - 0
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c

@@ -458,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
 	struct qlcnic_cmd_args cmd;
 	int ret = 0;
 
+	memset(&cmd, 0, sizeof(cmd));
 	ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
 	if (ret)
 		return ret;
@@ -1463,6 +1464,7 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
 	struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
 	int ret;
 
+	memset(&cmd, 0, sizeof(cmd));
 	if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
 		return -ENOMEM;
 
@@ -2033,6 +2035,7 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
 	struct qlcnic_cmd_args cmd;
 	int ret;
 
+	memset(&cmd, 0, sizeof(cmd));
 	if (vid == 0)
 		return 0;
 

+ 25 - 11
drivers/net/xen-netback/netback.c

@@ -168,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
  * adding 'size' bytes to a buffer which currently contains 'offset'
  * bytes.
  */
-static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+static bool start_new_rx_buffer(int offset, unsigned long size, int head,
+				bool full_coalesce)
 {
 	/* simple case: we have completely filled the current buffer. */
 	if (offset == MAX_BUFFER_OFFSET)
@@ -180,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
 	 *     (i)   this frag would fit completely in the next buffer
 	 * and (ii)  there is already some data in the current buffer
 	 * and (iii) this is not the head buffer.
+	 * and (iv)  there is no need to fully utilize the buffers
 	 *
 	 * Where:
 	 * - (i) stops us splitting a frag into two copies
@@ -190,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
 	 *   by (ii) but is explicitly checked because
 	 *   netfront relies on the first buffer being
 	 *   non-empty and can crash otherwise.
+	 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
+	 *   slot
 	 *
 	 * This means we will effectively linearise small
 	 * frags but do not needlessly split large buffers
@@ -197,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
 	 * own buffers as before.
 	 */
 	BUG_ON(size > MAX_BUFFER_OFFSET);
-	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
+	    !full_coalesce)
 		return true;
 
 	return false;
@@ -232,6 +237,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
 	return meta;
 }
 
+struct xenvif_rx_cb {
+	int meta_slots_used;
+	bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
 /*
  * Set up the grant operations for this fragment. If it's a flipping
  * interface, we also set up the unmap request from here.
@@ -266,7 +278,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
 		if (bytes > size)
 			bytes = size;
 
-		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
+		if (start_new_rx_buffer(npo->copy_off,
+					bytes,
+					*head,
+					XENVIF_RX_CB(skb)->full_coalesce)) {
 			/*
 			 * Netfront requires there to be some data in the head
 			 * buffer.
@@ -548,12 +563,6 @@ static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
 	}
 }
 
-struct xenvif_rx_cb {
-	int meta_slots_used;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
 void xenvif_kick_thread(struct xenvif_queue *queue)
 {
 	wake_up(&queue->wq);
@@ -609,10 +618,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
 		/* To avoid the estimate becoming too pessimal for some
 		 * frontends that limit posted rx requests, cap the estimate
-		 * at MAX_SKB_FRAGS.
+		 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
+		 * the skb into the provided slots.
 		 */
-		if (max_slots_needed > MAX_SKB_FRAGS)
+		if (max_slots_needed > MAX_SKB_FRAGS) {
 			max_slots_needed = MAX_SKB_FRAGS;
+			XENVIF_RX_CB(skb)->full_coalesce = true;
+		} else {
+			XENVIF_RX_CB(skb)->full_coalesce = false;
+		}
 
 		/* We may need one more slot for GSO metadata */
 		if (skb_is_gso(skb) &&

+ 8 - 0
net/core/filter.c

@@ -584,7 +584,11 @@ load_byte:
  * to make sure its still a 3bit field starting at a byte boundary;
  * taken from arch/x86/net/bpf_jit_comp.c.
  */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX	(7 << 5)
+#else
 #define PKT_TYPE_MAX	7
+#endif
 static unsigned int pkt_type_offset(void)
 {
 	struct sk_buff skb_probe = { .pkt_type = ~0, };
@@ -685,6 +689,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
 			return false;
 		insn++;
 		*insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
+#ifdef __BIG_ENDIAN_BITFIELD
+		insn++;
+                *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
+#endif
 		break;
 
 	case SKF_AD_OFF + SKF_AD_IFINDEX:

+ 4 - 4
net/ipv6/udp.c

@@ -720,15 +720,15 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
 				if (inet->inet_dport != rmt_port)
 					continue;
 			}
-			if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
-			    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
+			if (!ipv6_addr_any(&s->sk_v6_daddr) &&
+			    !ipv6_addr_equal(&s->sk_v6_daddr, rmt_addr))
 				continue;
 
 			if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
 				continue;
 
-			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-				if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
+			if (!ipv6_addr_any(&s->sk_v6_rcv_saddr)) {
+				if (!ipv6_addr_equal(&s->sk_v6_rcv_saddr, loc_addr))
 					continue;
 			}
 			if (!inet6_mc_check(s, loc_addr, rmt_addr))