|
@@ -1684,6 +1684,47 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
|
|
.arg4_type = ARG_CONST_SIZE,
|
|
.arg4_type = ARG_CONST_SIZE,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
|
|
|
|
+ u32, offset, void *, to, u32, len, u32, start_header)
|
|
|
|
+{
|
|
|
|
+ u8 *ptr;
|
|
|
|
+
|
|
|
|
+ if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
|
|
|
|
+ goto err_clear;
|
|
|
|
+
|
|
|
|
+ switch (start_header) {
|
|
|
|
+ case BPF_HDR_START_MAC:
|
|
|
|
+ ptr = skb_mac_header(skb) + offset;
|
|
|
|
+ break;
|
|
|
|
+ case BPF_HDR_START_NET:
|
|
|
|
+ ptr = skb_network_header(skb) + offset;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ goto err_clear;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (likely(ptr >= skb_mac_header(skb) &&
|
|
|
|
+ ptr + len <= skb_tail_pointer(skb))) {
|
|
|
|
+ memcpy(to, ptr, len);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+err_clear:
|
|
|
|
+ memset(to, 0, len);
|
|
|
|
+ return -EFAULT;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
|
|
|
|
+ .func = bpf_skb_load_bytes_relative,
|
|
|
|
+ .gpl_only = false,
|
|
|
|
+ .ret_type = RET_INTEGER,
|
|
|
|
+ .arg1_type = ARG_PTR_TO_CTX,
|
|
|
|
+ .arg2_type = ARG_ANYTHING,
|
|
|
|
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
|
|
|
|
+ .arg4_type = ARG_CONST_SIZE,
|
|
|
|
+ .arg5_type = ARG_ANYTHING,
|
|
|
|
+};
|
|
|
|
+
|
|
BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
|
|
BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
|
|
{
|
|
{
|
|
/* Idea is the following: should the needed direct read/write
|
|
/* Idea is the following: should the needed direct read/write
|
|
@@ -4061,6 +4102,8 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
switch (func_id) {
|
|
switch (func_id) {
|
|
case BPF_FUNC_skb_load_bytes:
|
|
case BPF_FUNC_skb_load_bytes:
|
|
return &bpf_skb_load_bytes_proto;
|
|
return &bpf_skb_load_bytes_proto;
|
|
|
|
+ case BPF_FUNC_skb_load_bytes_relative:
|
|
|
|
+ return &bpf_skb_load_bytes_relative_proto;
|
|
case BPF_FUNC_get_socket_cookie:
|
|
case BPF_FUNC_get_socket_cookie:
|
|
return &bpf_get_socket_cookie_proto;
|
|
return &bpf_get_socket_cookie_proto;
|
|
case BPF_FUNC_get_socket_uid:
|
|
case BPF_FUNC_get_socket_uid:
|
|
@@ -4078,6 +4121,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
return &bpf_skb_store_bytes_proto;
|
|
return &bpf_skb_store_bytes_proto;
|
|
case BPF_FUNC_skb_load_bytes:
|
|
case BPF_FUNC_skb_load_bytes:
|
|
return &bpf_skb_load_bytes_proto;
|
|
return &bpf_skb_load_bytes_proto;
|
|
|
|
+ case BPF_FUNC_skb_load_bytes_relative:
|
|
|
|
+ return &bpf_skb_load_bytes_relative_proto;
|
|
case BPF_FUNC_skb_pull_data:
|
|
case BPF_FUNC_skb_pull_data:
|
|
return &bpf_skb_pull_data_proto;
|
|
return &bpf_skb_pull_data_proto;
|
|
case BPF_FUNC_csum_diff:
|
|
case BPF_FUNC_csum_diff:
|