Эх сурвалжийг харах

Merge branch 'filter-next'

Daniel Borkmann says:

====================
BPF updates

These were still in my queue. Please see individual patches for
details.

I have rebased these on top of current net-next with Andrew's
gcc union fixup [1] applied to avoid dealing with an unnecessary
merge conflict.

 [1] http://patchwork.ozlabs.org/patch/351577/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 11 жил өмнө
parent
commit
be65de7174

+ 14 - 0
Documentation/networking/filter.txt

@@ -833,6 +833,20 @@ loops and other CFG validation; second step starts from the first insn and
 descends all possible paths. It simulates execution of every insn and observes
 descends all possible paths. It simulates execution of every insn and observes
 the state change of registers and stack.
 the state change of registers and stack.
 
 
+Testing
+-------
+
+Next to the BPF toolchain, the kernel also ships a test module that contains
+various test cases for classic and internal BPF that can be executed against
+the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and
+enabled via Kconfig:
+
+  CONFIG_TEST_BPF=m
+
+After the module has been built and installed, the test suite can be executed
+via insmod or modprobe against 'test_bpf' module. Results of the test cases
+including timings in nsec can be found in the kernel log (dmesg).
+
 Misc
 Misc
 ----
 ----
 
 

+ 2 - 2
drivers/isdn/i4l/isdn_ppp.c

@@ -634,7 +634,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
 #ifdef CONFIG_IPPP_FILTER
 #ifdef CONFIG_IPPP_FILTER
 	case PPPIOCSPASS:
 	case PPPIOCSPASS:
 	{
 	{
-		struct sock_fprog fprog;
+		struct sock_fprog_kern fprog;
 		struct sock_filter *code;
 		struct sock_filter *code;
 		int err, len = get_filter(argp, &code);
 		int err, len = get_filter(argp, &code);
 
 
@@ -653,7 +653,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
 	}
 	}
 	case PPPIOCSACTIVE:
 	case PPPIOCSACTIVE:
 	{
 	{
-		struct sock_fprog fprog;
+		struct sock_fprog_kern fprog;
 		struct sock_filter *code;
 		struct sock_filter *code;
 		int err, len = get_filter(argp, &code);
 		int err, len = get_filter(argp, &code);
 
 

+ 2 - 2
drivers/net/ppp/ppp_generic.c

@@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 
 		err = get_filter(argp, &code);
 		err = get_filter(argp, &code);
 		if (err >= 0) {
 		if (err >= 0) {
-			struct sock_fprog fprog = {
+			struct sock_fprog_kern fprog = {
 				.len = err,
 				.len = err,
 				.filter = code,
 				.filter = code,
 			};
 			};
@@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 
 		err = get_filter(argp, &code);
 		err = get_filter(argp, &code);
 		if (err >= 0) {
 		if (err >= 0) {
-			struct sock_fprog fprog = {
+			struct sock_fprog_kern fprog = {
 				.len = err,
 				.len = err,
 				.filter = code,
 				.filter = code,
 			};
 			};

+ 5 - 5
drivers/net/team/team_mode_loadbalance.c

@@ -49,7 +49,7 @@ struct lb_port_mapping {
 struct lb_priv_ex {
 struct lb_priv_ex {
 	struct team *team;
 	struct team *team;
 	struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
 	struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
-	struct sock_fprog *orig_fprog;
+	struct sock_fprog_kern *orig_fprog;
 	struct {
 	struct {
 		unsigned int refresh_interval; /* in tenths of second */
 		unsigned int refresh_interval; /* in tenths of second */
 		struct delayed_work refresh_dw;
 		struct delayed_work refresh_dw;
@@ -241,10 +241,10 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
 	return 0;
 	return 0;
 }
 }
 
 
-static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
 			  const void *data)
 			  const void *data)
 {
 {
-	struct sock_fprog *fprog;
+	struct sock_fprog_kern *fprog;
 	struct sock_filter *filter = (struct sock_filter *) data;
 	struct sock_filter *filter = (struct sock_filter *) data;
 
 
 	if (data_len % sizeof(struct sock_filter))
 	if (data_len % sizeof(struct sock_filter))
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
 	return 0;
 	return 0;
 }
 }
 
 
-static void __fprog_destroy(struct sock_fprog *fprog)
+static void __fprog_destroy(struct sock_fprog_kern *fprog)
 {
 {
 	kfree(fprog->filter);
 	kfree(fprog->filter);
 	kfree(fprog);
 	kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
 	struct lb_priv *lb_priv = get_lb_priv(team);
 	struct lb_priv *lb_priv = get_lb_priv(team);
 	struct sk_filter *fp = NULL;
 	struct sk_filter *fp = NULL;
 	struct sk_filter *orig_fp;
 	struct sk_filter *orig_fp;
-	struct sock_fprog *fprog = NULL;
+	struct sock_fprog_kern *fprog = NULL;
 	int err;
 	int err;
 
 
 	if (ctx->data.bin_val.len) {
 	if (ctx->data.bin_val.len) {

+ 1 - 4
include/linux/filter.h

@@ -37,9 +37,6 @@
 #define BPF_CALL	0x80	/* function call */
 #define BPF_CALL	0x80	/* function call */
 #define BPF_EXIT	0x90	/* function return */
 #define BPF_EXIT	0x90	/* function return */
 
 
-/* Placeholder/dummy for 0 */
-#define BPF_0		0
-
 /* Register numbers */
 /* Register numbers */
 enum {
 enum {
 	BPF_REG_0 = 0,
 	BPF_REG_0 = 0,
@@ -191,7 +188,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
 		      struct sock_filter_int *new_prog, int *new_len);
 		      struct sock_filter_int *new_prog, int *new_len);
 
 
 int sk_unattached_filter_create(struct sk_filter **pfp,
 int sk_unattached_filter_create(struct sk_filter **pfp,
-				struct sock_fprog *fprog);
+				struct sock_fprog_kern *fprog);
 void sk_unattached_filter_destroy(struct sk_filter *fp);
 void sk_unattached_filter_destroy(struct sk_filter *fp);
 
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);

+ 327 - 155
lib/test_bpf.c

@@ -22,12 +22,14 @@
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/if_vlan.h>
 
 
+/* General test specific settings */
 #define MAX_SUBTESTS	3
 #define MAX_SUBTESTS	3
+#define MAX_TESTRUNS	10000
 #define MAX_DATA	128
 #define MAX_DATA	128
 #define MAX_INSNS	512
 #define MAX_INSNS	512
 #define MAX_K		0xffffFFFF
 #define MAX_K		0xffffFFFF
 
 
-/* define few constants used to init test 'skb' */
+/* Few constants used to init test 'skb' */
 #define SKB_TYPE	3
 #define SKB_TYPE	3
 #define SKB_MARK	0x1234aaaa
 #define SKB_MARK	0x1234aaaa
 #define SKB_HASH	0x1234aaab
 #define SKB_HASH	0x1234aaab
@@ -36,18 +38,29 @@
 #define SKB_DEV_IFINDEX	577
 #define SKB_DEV_IFINDEX	577
 #define SKB_DEV_TYPE	588
 #define SKB_DEV_TYPE	588
 
 
-/* redefine REGs to make tests less verbose */
-#define R0 BPF_REG_0
-#define R1 BPF_REG_1
-#define R2 BPF_REG_2
-#define R3 BPF_REG_3
-#define R4 BPF_REG_4
-#define R5 BPF_REG_5
-#define R6 BPF_REG_6
-#define R7 BPF_REG_7
-#define R8 BPF_REG_8
-#define R9 BPF_REG_9
-#define R10 BPF_REG_10
+/* Redefine REGs to make tests less verbose */
+#define R0		BPF_REG_0
+#define R1		BPF_REG_1
+#define R2		BPF_REG_2
+#define R3		BPF_REG_3
+#define R4		BPF_REG_4
+#define R5		BPF_REG_5
+#define R6		BPF_REG_6
+#define R7		BPF_REG_7
+#define R8		BPF_REG_8
+#define R9		BPF_REG_9
+#define R10		BPF_REG_10
+
+/* Flags that can be passed to test cases */
+#define FLAG_NO_DATA		BIT(0)
+#define FLAG_EXPECTED_FAIL	BIT(1)
+
+enum {
+	CLASSIC  = BIT(6),	/* Old BPF instructions only. */
+	INTERNAL = BIT(7),	/* Extended instruction set.  */
+};
+
+#define TEST_TYPE_MASK		(CLASSIC | INTERNAL)
 
 
 struct bpf_test {
 struct bpf_test {
 	const char *descr;
 	const char *descr;
@@ -55,12 +68,7 @@ struct bpf_test {
 		struct sock_filter insns[MAX_INSNS];
 		struct sock_filter insns[MAX_INSNS];
 		struct sock_filter_int insns_int[MAX_INSNS];
 		struct sock_filter_int insns_int[MAX_INSNS];
 	} u;
 	} u;
-	enum {
-		NO_DATA,
-		EXPECTED_FAIL,
-		SKB,
-		SKB_INT
-	} data_type;
+	__u8 aux;
 	__u8 data[MAX_DATA];
 	__u8 data[MAX_DATA];
 	struct {
 	struct {
 		int data_size;
 		int data_size;
@@ -84,7 +92,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
 			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 10, 20, 30, 40, 50 },
 		{ 10, 20, 30, 40, 50 },
 		{ { 2, 10 }, { 3, 20 }, { 4, 30 } },
 		{ { 2, 10 }, { 3, 20 }, { 4, 30 } },
 	},
 	},
@@ -96,7 +104,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
 			BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 10, 20, 30, 40, 50 },
 		{ 10, 20, 30, 40, 50 },
 		{ { 1, 2 }, { 3, 6 }, { 4, 8 } },
 		{ { 1, 2 }, { 3, 6 }, { 4, 8 } },
 	},
 	},
@@ -111,7 +119,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
 			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		0,
+		CLASSIC | FLAG_NO_DATA,
 		{ },
 		{ },
 		{ { 0, 0xfffffffd } }
 		{ { 0, 0xfffffffd } }
 	},
 	},
@@ -129,7 +137,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		0,
+		CLASSIC | FLAG_NO_DATA,
 		{ },
 		{ },
 		{ { 0, 0x40000001 } }
 		{ { 0, 0x40000001 } }
 	},
 	},
@@ -145,7 +153,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		0,
+		CLASSIC | FLAG_NO_DATA,
 		{ },
 		{ },
 		{ { 0, 0x800000ff }, { 1, 0x800000ff } },
 		{ { 0, 0x800000ff }, { 1, 0x800000ff } },
 	},
 	},
@@ -156,7 +164,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
 			BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
 			BPF_STMT(BPF_RET | BPF_K, 1)
 			BPF_STMT(BPF_RET | BPF_K, 1)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
 		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
 	},
 	},
@@ -166,7 +174,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
 			BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
 			BPF_STMT(BPF_RET | BPF_K, 1)
 			BPF_STMT(BPF_RET | BPF_K, 1)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
 		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
 	},
 	},
@@ -179,7 +187,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 1, 2, 3 },
 		{ 1, 2, 3 },
 		{ { 1, 0 }, { 2, 3 } },
 		{ { 1, 0 }, { 2, 3 } },
 	},
 	},
@@ -193,7 +201,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
 			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 1, 2, 3, 0xff },
 		{ 1, 2, 3, 0xff },
 		{ { 1, 1 }, { 3, 3 }, { 4, 0xff } },
 		{ { 1, 1 }, { 3, 3 }, { 4, 0xff } },
 	},
 	},
@@ -206,7 +214,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
 		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
 		{ { 15, 0 }, { 16, 3 } },
 		{ { 15, 0 }, { 16, 3 } },
 	},
 	},
@@ -220,7 +228,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
 			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
 		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
 		{ { 14, 0 }, { 15, 1 }, { 17, 3 } },
 		{ { 14, 0 }, { 15, 1 }, { 17, 3 } },
 	},
 	},
@@ -241,7 +249,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_RET | BPF_K, 1),
 			BPF_STMT(BPF_RET | BPF_K, 1),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, 3 }, { 10, 3 } },
 		{ { 1, 3 }, { 10, 3 } },
 	},
 	},
@@ -252,7 +260,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_MARK),
 				 SKF_AD_OFF + SKF_AD_MARK),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, SKB_MARK}, { 10, SKB_MARK} },
 		{ { 1, SKB_MARK}, { 10, SKB_MARK} },
 	},
 	},
@@ -263,7 +271,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_RXHASH),
 				 SKF_AD_OFF + SKF_AD_RXHASH),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, SKB_HASH}, { 10, SKB_HASH} },
 		{ { 1, SKB_HASH}, { 10, SKB_HASH} },
 	},
 	},
@@ -274,7 +282,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_QUEUE),
 				 SKF_AD_OFF + SKF_AD_QUEUE),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
 		{ { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
 	},
 	},
@@ -293,7 +301,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_MISC | BPF_TXA, 0),
 			BPF_STMT(BPF_MISC | BPF_TXA, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 10, 20, 30 },
 		{ 10, 20, 30 },
 		{ { 10, ETH_P_IP }, { 100, ETH_P_IP } },
 		{ { 10, ETH_P_IP }, { 100, ETH_P_IP } },
 	},
 	},
@@ -304,7 +312,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_VLAN_TAG),
 				 SKF_AD_OFF + SKF_AD_VLAN_TAG),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{
 		{
 			{ 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
 			{ 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
@@ -318,7 +326,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
 				 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{
 		{
 			{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
 			{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
@@ -332,7 +340,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_IFINDEX),
 				 SKF_AD_OFF + SKF_AD_IFINDEX),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
 		{ { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
 	},
 	},
@@ -343,7 +351,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_HATYPE),
 				 SKF_AD_OFF + SKF_AD_HATYPE),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
 		{ { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
 	},
 	},
@@ -358,7 +366,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, 0 }, { 10, 0 } },
 		{ { 1, 0 }, { 10, 0 } },
 	},
 	},
@@ -372,7 +380,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_NLATTR),
 				 SKF_AD_OFF + SKF_AD_NLATTR),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
 		{ 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
 		{ { 4, 0 }, { 20, 5 } },
 		{ { 4, 0 }, { 20, 5 } },
 	},
 	},
@@ -406,7 +414,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
 				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
 		{ 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
 		{ { 4, 0 }, { 20, 9 } },
 		{ { 4, 0 }, { 20, 9 } },
 	},
 	},
@@ -425,7 +433,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
 				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		/* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
 		/* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
 		 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
 		 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
 		 * id 9737, seq 1, length 64
 		 * id 9737, seq 1, length 64
@@ -446,7 +454,7 @@ static struct bpf_test tests[] = {
 				 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
 				 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
 		{ { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
 	},
 	},
@@ -468,7 +476,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
 			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ },
 		{ },
 		{ { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
 		{ { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
 	},
 	},
@@ -481,7 +489,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_RET | BPF_K, 1),
 			BPF_STMT(BPF_RET | BPF_K, 1),
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 3, 3, 3, 3, 3 },
 		{ 3, 3, 3, 3, 3 },
 		{ { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
 		{ { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
 	},
 	},
@@ -494,7 +502,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_RET | BPF_K, 1),
 			BPF_STMT(BPF_RET | BPF_K, 1),
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 4, 4, 4, 3, 3 },
 		{ 4, 4, 4, 3, 3 },
 		{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
 		{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
 	},
 	},
@@ -513,7 +521,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_RET | BPF_K, 40),
 			BPF_STMT(BPF_RET | BPF_K, 40),
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 1, 2, 3, 4, 5 },
 		{ 1, 2, 3, 4, 5 },
 		{ { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
 		{ { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
 	},
 	},
@@ -545,7 +553,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_RET | BPF_K, 30),
 			BPF_STMT(BPF_RET | BPF_K, 30),
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 			BPF_STMT(BPF_RET | BPF_K, MAX_K)
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 0, 0xAA, 0x55, 1 },
 		{ 0, 0xAA, 0x55, 1 },
 		{ { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
 		{ { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
 	},
 	},
@@ -577,7 +585,7 @@ static struct bpf_test tests[] = {
 			{ 0x06,  0,  0, 0x0000ffff },
 			{ 0x06,  0,  0, 0x0000ffff },
 			{ 0x06,  0,  0, 0x00000000 },
 			{ 0x06,  0,  0, 0x00000000 },
 		},
 		},
-		SKB,
+		CLASSIC,
 		/* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
 		/* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
 		 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
 		 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
 		 * seq 1305692979:1305693027, ack 3650467037, win 65535,
 		 * seq 1305692979:1305693027, ack 3650467037, win 65535,
@@ -635,7 +643,7 @@ static struct bpf_test tests[] = {
 			{ 0x06,  0,  0, 0x0000ffff },
 			{ 0x06,  0,  0, 0x0000ffff },
 			{ 0x06,  0,  0, 0x00000000 },
 			{ 0x06,  0,  0, 0x00000000 },
 		},
 		},
-		SKB,
+		CLASSIC,
 		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
 		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
 		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
 		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
 		  0x08, 0x00,
 		  0x08, 0x00,
@@ -654,8 +662,8 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_MISC | BPF_TXA, 0),
 			BPF_STMT(BPF_MISC | BPF_TXA, 0),
 			BPF_STMT(BPF_RET | BPF_A, 0)
 			BPF_STMT(BPF_RET | BPF_A, 0)
 		},
 		},
-		SKB,
-		{},
+		CLASSIC,
+		{ },
 		{ {1, 0}, {2, 0} },
 		{ {1, 0}, {2, 0} },
 	},
 	},
 	{
 	{
@@ -670,7 +678,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 0xfffffffd } }
 		{ { 0, 0xfffffffd } }
 	},
 	},
@@ -686,7 +694,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_IMM(BPF_MOV, R0, 1),
 			BPF_ALU64_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 1 } }
 		{ { 0, 1 } }
 	},
 	},
@@ -703,7 +711,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 1 } }
 		{ { 0, 1 } }
 	},
 	},
@@ -720,7 +728,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 1 } }
 		{ { 0, 1 } }
 	},
 	},
@@ -882,7 +890,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_REG(BPF_MOV, R0, R9),
 			BPF_ALU64_REG(BPF_MOV, R0, R9),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 2957380 } }
 		{ { 0, 2957380 } }
 	},
 	},
@@ -1028,7 +1036,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU32_REG(BPF_MOV, R0, R9),
 			BPF_ALU32_REG(BPF_MOV, R0, R9),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 2957380 } }
 		{ { 0, 2957380 } }
 	},
 	},
@@ -1161,7 +1169,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_REG(BPF_SUB, R0, R9),
 			BPF_ALU64_REG(BPF_SUB, R0, R9),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 11 } }
 		{ { 0, 11 } }
 	},
 	},
@@ -1227,7 +1235,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_IMM(BPF_MOV, R0, 1),
 			BPF_ALU64_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 1 } }
 		{ { 0, 1 } }
 	},
 	},
@@ -1289,7 +1297,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_REG(BPF_MOV, R0, R2),
 			BPF_ALU64_REG(BPF_MOV, R0, R2),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, 0x35d97ef2 } }
 		{ { 0, 0x35d97ef2 } }
 	},
 	},
@@ -1309,7 +1317,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU64_IMM(BPF_MOV, R0, -1),
 			BPF_ALU64_IMM(BPF_MOV, R0, -1),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ },
 		{ },
 		{ { 0, -1 } }
 		{ { 0, -1 } }
 	},
 	},
@@ -1326,7 +1334,7 @@ static struct bpf_test tests[] = {
 			BPF_LD_IND(BPF_B, R8, -70),
 			BPF_LD_IND(BPF_B, R8, -70),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ 10, 20, 30, 40, 50 },
 		{ 10, 20, 30, 40, 50 },
 		{ { 4, 0 }, { 5, 10 } }
 		{ { 4, 0 }, { 5, 10 } }
 	},
 	},
@@ -1339,7 +1347,7 @@ static struct bpf_test tests[] = {
 			BPF_ALU32_REG(BPF_DIV, R0, R7),
 			BPF_ALU32_REG(BPF_DIV, R0, R7),
 			BPF_EXIT_INSN(),
 			BPF_EXIT_INSN(),
 		},
 		},
-		SKB_INT,
+		INTERNAL,
 		{ 10, 20, 30, 40, 50 },
 		{ 10, 20, 30, 40, 50 },
 		{ { 3, 0 }, { 4, 0 } }
 		{ { 3, 0 }, { 4, 0 } }
 	},
 	},
@@ -1348,7 +1356,7 @@ static struct bpf_test tests[] = {
 		.u.insns = {
 		.u.insns = {
 			BPF_STMT(BPF_LD | BPF_IMM, 1),
 			BPF_STMT(BPF_LD | BPF_IMM, 1),
 		},
 		},
-		EXPECTED_FAIL,
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
 		{ }
 		{ }
 	},
 	},
@@ -1358,7 +1366,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
 			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
 			BPF_STMT(BPF_RET | BPF_K, 0)
 			BPF_STMT(BPF_RET | BPF_K, 0)
 		},
 		},
-		EXPECTED_FAIL,
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
 		{ }
 		{ }
 	},
 	},
@@ -1369,7 +1377,7 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
 			BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
 			BPF_STMT(BPF_RET | BPF_K, 0)
 			BPF_STMT(BPF_RET | BPF_K, 0)
 		},
 		},
-		EXPECTED_FAIL,
+		CLASSIC | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
 		{ }
 		{ }
 	},
 	},
@@ -1379,26 +1387,109 @@ static struct bpf_test tests[] = {
 			BPF_STMT(BPF_STX, 16),
 			BPF_STMT(BPF_STX, 16),
 			BPF_STMT(BPF_RET | BPF_K, 0)
 			BPF_STMT(BPF_RET | BPF_K, 0)
 		},
 		},
-		EXPECTED_FAIL,
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
 		{ },
 		{ },
 		{ }
 		{ }
 	},
 	},
+	{
+		"JUMPS + HOLES",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+		},
+		CLASSIC,
+		{ 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8, 0x90, 0xe2,
+		  0xba, 0x0a, 0x56, 0xb4, 0x08, 0x00, 0x45, 0x00,
+		  0x00, 0x28, 0x00, 0x00, 0x20, 0x00, 0x40, 0x11,
+		  0x00, 0x00, 0xc0, 0xa8, 0x33, 0x01, 0xc0, 0xa8,
+		  0x33, 0x02, 0xbb, 0xb6, 0xa9, 0xfa, 0x00, 0x14,
+		  0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc },
+		{ { 88, 0x001b } }
+	},
+	{
+		"check: RET X",
+		.u.insns = {
+			BPF_STMT(BPF_RET | BPF_X, 0),
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+	},
+	{
+		"check: LDX + RET X",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 42),
+			BPF_STMT(BPF_RET | BPF_X, 0),
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+	},
 };
 };
 
 
-static int get_length(struct sock_filter *fp)
-{
-	int len = 0;
-
-	while (fp->code != 0 || fp->k != 0) {
-		fp++;
-		len++;
-	}
-
-	return len;
-}
+static struct net_device dev;
 
 
-struct net_device dev;
-struct sk_buff *populate_skb(char *buf, int size)
+static struct sk_buff *populate_skb(char *buf, int size)
 {
 {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 
 
@@ -1410,6 +1501,8 @@ struct sk_buff *populate_skb(char *buf, int size)
 		return NULL;
 		return NULL;
 
 
 	memcpy(__skb_put(skb, size), buf, size);
 	memcpy(__skb_put(skb, size), buf, size);
+
+	/* Initialize a fake skb with test pattern. */
 	skb_reset_mac_header(skb);
 	skb_reset_mac_header(skb);
 	skb->protocol = htons(ETH_P_IP);
 	skb->protocol = htons(ETH_P_IP);
 	skb->pkt_type = SKB_TYPE;
 	skb->pkt_type = SKB_TYPE;
@@ -1425,43 +1518,149 @@ struct sk_buff *populate_skb(char *buf, int size)
 	return skb;
 	return skb;
 }
 }
 
 
-static int run_one(struct sk_filter *fp, struct bpf_test *t)
+static void *generate_test_data(struct bpf_test *test, int sub)
 {
 {
-	u64 start, finish, res, cnt = 100000;
-	int err_cnt = 0, err, i, j;
-	u32 ret = 0;
-	void *data;
+	if (test->aux & FLAG_NO_DATA)
+		return NULL;
 
 
-	for (i = 0; i < MAX_SUBTESTS; i++) {
-		if (t->test[i].data_size == 0 &&
-		    t->test[i].result == 0)
-			break;
-		if (t->data_type == SKB ||
-		    t->data_type == SKB_INT) {
-			data = populate_skb(t->data, t->test[i].data_size);
-			if (!data)
-				return -ENOMEM;
-		} else {
-			data = NULL;
+	/* Test case expects an skb, so populate one. Various
+	 * subtests generate skbs of different sizes based on
+	 * the same data.
+	 */
+	return populate_skb(test->data, test->test[sub].data_size);
+}
+
+static void release_test_data(const struct bpf_test *test, void *data)
+{
+	if (test->aux & FLAG_NO_DATA)
+		return;
+
+	kfree_skb(data);
+}
+
+static int probe_filter_length(struct sock_filter *fp)
+{
+	int len = 0;
+
+	while (fp->code != 0 || fp->k != 0) {
+		fp++;
+		len++;
+	}
+
+	return len;
+}
+
+static struct sk_filter *generate_filter(int which, int *err)
+{
+	struct sk_filter *fp;
+	struct sock_fprog_kern fprog;
+	unsigned int flen = probe_filter_length(tests[which].u.insns);
+	__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+	switch (test_type) {
+	case CLASSIC:
+		fprog.filter = tests[which].u.insns;
+		fprog.len = flen;
+
+		*err = sk_unattached_filter_create(&fp, &fprog);
+		if (tests[which].aux & FLAG_EXPECTED_FAIL) {
+			if (*err == -EINVAL) {
+				pr_cont("PASS\n");
+				/* Verifier rejected filter as expected. */
+				*err = 0;
+				return NULL;
+			} else {
+				pr_cont("UNEXPECTED_PASS\n");
+				/* Verifier didn't reject the test that's
+				 * bad enough, just return!
+				 */
+				*err = -EINVAL;
+				return NULL;
+			}
+		}
+		/* We don't expect to fail. */
+		if (*err) {
+			pr_cont("FAIL to attach err=%d len=%d\n",
+				*err, fprog.len);
+			return NULL;
 		}
 		}
+		break;
 
 
-		start = ktime_to_us(ktime_get());
-		for (j = 0; j < cnt; j++)
-			ret = SK_RUN_FILTER(fp, data);
-		finish = ktime_to_us(ktime_get());
+	case INTERNAL:
+		fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
+		if (fp == NULL) {
+			pr_cont("UNEXPECTED_FAIL no memory left\n");
+			*err = -ENOMEM;
+			return NULL;
+		}
+
+		fp->len = flen;
+		memcpy(fp->insnsi, tests[which].u.insns_int,
+		       fp->len * sizeof(struct sock_filter_int));
 
 
-		res = (finish - start) * 1000;
-		do_div(res, cnt);
+		sk_filter_select_runtime(fp);
+		break;
+	}
 
 
-		err = ret != t->test[i].result;
-		if (!err)
-			pr_cont("%lld ", res);
+	*err = 0;
+	return fp;
+}
 
 
-		if (t->data_type == SKB || t->data_type == SKB_INT)
-			kfree_skb(data);
+static void release_filter(struct sk_filter *fp, int which)
+{
+	__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
 
 
-		if (err) {
-			pr_cont("ret %d != %d ", ret, t->test[i].result);
+	switch (test_type) {
+	case CLASSIC:
+		sk_unattached_filter_destroy(fp);
+		break;
+	case INTERNAL:
+		sk_filter_free(fp);
+		break;
+	}
+}
+
+static int __run_one(const struct sk_filter *fp, const void *data,
+		     int runs, u64 *duration)
+{
+	u64 start, finish;
+	int ret, i;
+
+	start = ktime_to_us(ktime_get());
+
+	for (i = 0; i < runs; i++)
+		ret = SK_RUN_FILTER(fp, data);
+
+	finish = ktime_to_us(ktime_get());
+
+	*duration = (finish - start) * 1000ULL;
+	do_div(*duration, runs);
+
+	return ret;
+}
+
+static int run_one(const struct sk_filter *fp, struct bpf_test *test)
+{
+	int err_cnt = 0, i, runs = MAX_TESTRUNS;
+
+	for (i = 0; i < MAX_SUBTESTS; i++) {
+		void *data;
+		u64 duration;
+		u32 ret;
+
+		if (test->test[i].data_size == 0 &&
+		    test->test[i].result == 0)
+			break;
+
+		data = generate_test_data(test, i);
+		ret = __run_one(fp, data, runs, &duration);
+		release_test_data(test, data);
+
+		if (ret == test->test[i].result) {
+			pr_cont("%lld ", duration);
+		} else {
+			pr_cont("ret %d != %d ", ret,
+				test->test[i].result);
 			err_cnt++;
 			err_cnt++;
 		}
 		}
 	}
 	}
@@ -1471,65 +1670,37 @@ static int run_one(struct sk_filter *fp, struct bpf_test *t)
 
 
 static __init int test_bpf(void)
 static __init int test_bpf(void)
 {
 {
-	struct sk_filter *fp, *fp_ext = NULL;
-	struct sock_fprog fprog;
-	int err, i, err_cnt = 0;
+	int i, err_cnt = 0, pass_cnt = 0;
 
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
-		pr_info("#%d %s ", i, tests[i].descr);
+		struct sk_filter *fp;
+		int err;
 
 
-		fprog.filter = tests[i].u.insns;
-		fprog.len = get_length(fprog.filter);
+		pr_info("#%d %s ", i, tests[i].descr);
 
 
-		if (tests[i].data_type == SKB_INT) {
-			fp_ext = kzalloc(4096, GFP_KERNEL);
-			if (!fp_ext)
-				return -ENOMEM;
-			fp = fp_ext;
-			memcpy(fp_ext->insns, tests[i].u.insns_int,
-			       fprog.len * 8);
-			fp->len = fprog.len;
-			sk_filter_select_runtime(fp);
-		} else {
-			err = sk_unattached_filter_create(&fp, &fprog);
-			if (tests[i].data_type == EXPECTED_FAIL) {
-				if (err == -EINVAL) {
-					pr_cont("PASS\n");
-					continue;
-				} else {
-					pr_cont("UNEXPECTED_PASS\n");
-					/* verifier didn't reject the test
-					 * that's bad enough, just return
-					 */
-					return -EINVAL;
-				}
+		fp = generate_filter(i, &err);
+		if (fp == NULL) {
+			if (err == 0) {
+				pass_cnt++;
+				continue;
 			}
 			}
-			if (err) {
-				pr_cont("FAIL to attach err=%d len=%d\n",
-					err, fprog.len);
-				return err;
-			}
-		}
 
 
+			return err;
+		}
 		err = run_one(fp, &tests[i]);
 		err = run_one(fp, &tests[i]);
-
-		if (tests[i].data_type != SKB_INT)
-			sk_unattached_filter_destroy(fp);
-		else
-			sk_filter_free(fp);
+		release_filter(fp, i);
 
 
 		if (err) {
 		if (err) {
-			pr_cont("FAIL %d\n", err);
+			pr_cont("FAIL (%d times)\n", err);
 			err_cnt++;
 			err_cnt++;
 		} else {
 		} else {
 			pr_cont("PASS\n");
 			pr_cont("PASS\n");
+			pass_cnt++;
 		}
 		}
 	}
 	}
 
 
-	if (err_cnt)
-		return -EINVAL;
-	else
-		return 0;
+	pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+	return err_cnt ? -EINVAL : 0;
 }
 }
 
 
 static int __init test_bpf_init(void)
 static int __init test_bpf_init(void)
@@ -1543,4 +1714,5 @@ static void __exit test_bpf_exit(void)
 
 
 module_init(test_bpf_init);
 module_init(test_bpf_init);
 module_exit(test_bpf_exit);
 module_exit(test_bpf_exit);
+
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 100 - 95
net/core/filter.c

@@ -160,95 +160,100 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
 	static const void *jumptable[256] = {
 	static const void *jumptable[256] = {
 		[0 ... 255] = &&default_label,
 		[0 ... 255] = &&default_label,
 		/* Now overwrite non-defaults ... */
 		/* Now overwrite non-defaults ... */
-#define DL(A, B, C)	[BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C
-		DL(ALU, ADD, X),
-		DL(ALU, ADD, K),
-		DL(ALU, SUB, X),
-		DL(ALU, SUB, K),
-		DL(ALU, AND, X),
-		DL(ALU, AND, K),
-		DL(ALU, OR, X),
-		DL(ALU, OR, K),
-		DL(ALU, LSH, X),
-		DL(ALU, LSH, K),
-		DL(ALU, RSH, X),
-		DL(ALU, RSH, K),
-		DL(ALU, XOR, X),
-		DL(ALU, XOR, K),
-		DL(ALU, MUL, X),
-		DL(ALU, MUL, K),
-		DL(ALU, MOV, X),
-		DL(ALU, MOV, K),
-		DL(ALU, DIV, X),
-		DL(ALU, DIV, K),
-		DL(ALU, MOD, X),
-		DL(ALU, MOD, K),
-		DL(ALU, NEG, 0),
-		DL(ALU, END, TO_BE),
-		DL(ALU, END, TO_LE),
-		DL(ALU64, ADD, X),
-		DL(ALU64, ADD, K),
-		DL(ALU64, SUB, X),
-		DL(ALU64, SUB, K),
-		DL(ALU64, AND, X),
-		DL(ALU64, AND, K),
-		DL(ALU64, OR, X),
-		DL(ALU64, OR, K),
-		DL(ALU64, LSH, X),
-		DL(ALU64, LSH, K),
-		DL(ALU64, RSH, X),
-		DL(ALU64, RSH, K),
-		DL(ALU64, XOR, X),
-		DL(ALU64, XOR, K),
-		DL(ALU64, MUL, X),
-		DL(ALU64, MUL, K),
-		DL(ALU64, MOV, X),
-		DL(ALU64, MOV, K),
-		DL(ALU64, ARSH, X),
-		DL(ALU64, ARSH, K),
-		DL(ALU64, DIV, X),
-		DL(ALU64, DIV, K),
-		DL(ALU64, MOD, X),
-		DL(ALU64, MOD, K),
-		DL(ALU64, NEG, 0),
-		DL(JMP, CALL, 0),
-		DL(JMP, JA, 0),
-		DL(JMP, JEQ, X),
-		DL(JMP, JEQ, K),
-		DL(JMP, JNE, X),
-		DL(JMP, JNE, K),
-		DL(JMP, JGT, X),
-		DL(JMP, JGT, K),
-		DL(JMP, JGE, X),
-		DL(JMP, JGE, K),
-		DL(JMP, JSGT, X),
-		DL(JMP, JSGT, K),
-		DL(JMP, JSGE, X),
-		DL(JMP, JSGE, K),
-		DL(JMP, JSET, X),
-		DL(JMP, JSET, K),
-		DL(JMP, EXIT, 0),
-		DL(STX, MEM, B),
-		DL(STX, MEM, H),
-		DL(STX, MEM, W),
-		DL(STX, MEM, DW),
-		DL(STX, XADD, W),
-		DL(STX, XADD, DW),
-		DL(ST, MEM, B),
-		DL(ST, MEM, H),
-		DL(ST, MEM, W),
-		DL(ST, MEM, DW),
-		DL(LDX, MEM, B),
-		DL(LDX, MEM, H),
-		DL(LDX, MEM, W),
-		DL(LDX, MEM, DW),
-		DL(LD, ABS, W),
-		DL(LD, ABS, H),
-		DL(LD, ABS, B),
-		DL(LD, IND, W),
-		DL(LD, IND, H),
-		DL(LD, IND, B),
-#undef DL
+		/* 32 bit ALU operations */
+		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
+		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
+		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
+		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
+		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
+		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
+		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
+		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
+		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
+		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
+		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
+		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
+		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
+		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
+		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
+		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
+		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
+		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
+		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
+		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
+		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
+		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
+		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
+		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
+		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
+		/* 64 bit ALU operations */
+		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
+		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
+		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
+		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
+		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
+		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
+		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
+		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
+		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
+		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
+		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
+		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
+		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
+		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
+		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
+		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
+		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
+		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
+		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
+		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
+		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
+		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
+		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
+		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
+		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
+		/* Call instruction */
+		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
+		/* Jumps */
+		[BPF_JMP | BPF_JA] = &&JMP_JA,
+		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
+		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
+		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
+		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
+		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
+		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
+		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
+		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
+		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
+		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
+		/* Program return */
+		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
+		/* Store instructions */
+		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
+		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
+		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
+		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
+		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
+		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
+		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
+		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
+		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
+		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
+		/* Load instructions */
+		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
+		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
+		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
+		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
+		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
+		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
+		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
+		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
+		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
+		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
 	};
 	};
 	void *ptr;
 	void *ptr;
 	int off;
 	int off;
@@ -290,10 +295,10 @@ select_insn:
 	ALU(XOR,  ^)
 	ALU(XOR,  ^)
 	ALU(MUL,  *)
 	ALU(MUL,  *)
 #undef ALU
 #undef ALU
-	ALU_NEG_0:
+	ALU_NEG:
 		A = (u32) -A;
 		A = (u32) -A;
 		CONT;
 		CONT;
-	ALU64_NEG_0:
+	ALU64_NEG:
 		A = -A;
 		A = -A;
 		CONT;
 		CONT;
 	ALU_MOV_X:
 	ALU_MOV_X:
@@ -382,7 +387,7 @@ select_insn:
 		CONT;
 		CONT;
 
 
 	/* CALL */
 	/* CALL */
-	JMP_CALL_0:
+	JMP_CALL:
 		/* Function call scratches BPF_R1-BPF_R5 registers,
 		/* Function call scratches BPF_R1-BPF_R5 registers,
 		 * preserves BPF_R6-BPF_R9, and stores return value
 		 * preserves BPF_R6-BPF_R9, and stores return value
 		 * into BPF_R0.
 		 * into BPF_R0.
@@ -392,7 +397,7 @@ select_insn:
 		CONT;
 		CONT;
 
 
 	/* JMP */
 	/* JMP */
-	JMP_JA_0:
+	JMP_JA:
 		insn += insn->off;
 		insn += insn->off;
 		CONT;
 		CONT;
 	JMP_JEQ_X:
 	JMP_JEQ_X:
@@ -479,7 +484,7 @@ select_insn:
 			CONT_JMP;
 			CONT_JMP;
 		}
 		}
 		CONT;
 		CONT;
-	JMP_EXIT_0:
+	JMP_EXIT:
 		return BPF_R0;
 		return BPF_R0;
 
 
 	/* STX and ST and LDX*/
 	/* STX and ST and LDX*/
@@ -1580,7 +1585,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
  * a negative errno code is returned. On success the return is zero.
  * a negative errno code is returned. On success the return is zero.
  */
  */
 int sk_unattached_filter_create(struct sk_filter **pfp,
 int sk_unattached_filter_create(struct sk_filter **pfp,
-				struct sock_fprog *fprog)
+				struct sock_fprog_kern *fprog)
 {
 {
 	unsigned int fsize = sk_filter_proglen(fprog);
 	unsigned int fsize = sk_filter_proglen(fprog);
 	struct sk_filter *fp;
 	struct sk_filter *fp;

+ 1 - 1
net/core/ptp_classifier.c

@@ -133,7 +133,7 @@ void __init ptp_classifier_init(void)
 		{ 0x16,  0,  0, 0x00000000 },
 		{ 0x16,  0,  0, 0x00000000 },
 		{ 0x06,  0,  0, 0x00000000 },
 		{ 0x06,  0,  0, 0x00000000 },
 	};
 	};
-	struct sock_fprog ptp_prog = {
+	struct sock_fprog_kern ptp_prog = {
 		.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
 		.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
 	};
 	};
 
 

+ 3 - 2
net/netfilter/xt_bpf.c

@@ -23,10 +23,11 @@ MODULE_ALIAS("ip6t_bpf");
 static int bpf_mt_check(const struct xt_mtchk_param *par)
 static int bpf_mt_check(const struct xt_mtchk_param *par)
 {
 {
 	struct xt_bpf_info *info = par->matchinfo;
 	struct xt_bpf_info *info = par->matchinfo;
-	struct sock_fprog program;
+	struct sock_fprog_kern program;
 
 
 	program.len = info->bpf_program_num_elem;
 	program.len = info->bpf_program_num_elem;
-	program.filter = (struct sock_filter __user *) info->bpf_program;
+	program.filter = info->bpf_program;
+
 	if (sk_unattached_filter_create(&info->filter, &program)) {
 	if (sk_unattached_filter_create(&info->filter, &program)) {
 		pr_info("bpf: check failed: parse error\n");
 		pr_info("bpf: check failed: parse error\n");
 		return -EINVAL;
 		return -EINVAL;

+ 2 - 2
net/sched/cls_bpf.c

@@ -160,7 +160,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
 {
 {
 	struct sock_filter *bpf_ops, *bpf_old;
 	struct sock_filter *bpf_ops, *bpf_old;
 	struct tcf_exts exts;
 	struct tcf_exts exts;
-	struct sock_fprog tmp;
+	struct sock_fprog_kern tmp;
 	struct sk_filter *fp, *fp_old;
 	struct sk_filter *fp, *fp_old;
 	u16 bpf_size, bpf_len;
 	u16 bpf_size, bpf_len;
 	u32 classid;
 	u32 classid;
@@ -191,7 +191,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 
 
 	tmp.len = bpf_len;
 	tmp.len = bpf_len;
-	tmp.filter = (struct sock_filter __user *) bpf_ops;
+	tmp.filter = bpf_ops;
 
 
 	ret = sk_unattached_filter_create(&fp, &tmp);
 	ret = sk_unattached_filter_create(&fp, &tmp);
 	if (ret)
 	if (ret)