|
@@ -176,6 +176,15 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+const struct bpf_func_proto bpf_tail_call_proto = {
|
|
|
|
+ .func = NULL,
|
|
|
|
+ .gpl_only = false,
|
|
|
|
+ .ret_type = RET_VOID,
|
|
|
|
+ .arg1_type = ARG_PTR_TO_CTX,
|
|
|
|
+ .arg2_type = ARG_CONST_MAP_PTR,
|
|
|
|
+ .arg3_type = ARG_ANYTHING,
|
|
|
|
+};
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* __bpf_prog_run - run eBPF program on a given context
|
|
* __bpf_prog_run - run eBPF program on a given context
|
|
* @ctx: is the data we are operating on
|
|
* @ctx: is the data we are operating on
|
|
@@ -244,6 +253,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
|
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
|
|
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
|
|
/* Call instruction */
|
|
/* Call instruction */
|
|
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
|
|
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
|
|
|
|
+ [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
|
|
/* Jumps */
|
|
/* Jumps */
|
|
[BPF_JMP | BPF_JA] = &&JMP_JA,
|
|
[BPF_JMP | BPF_JA] = &&JMP_JA,
|
|
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
|
|
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
|
|
@@ -286,6 +296,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
|
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
|
|
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
|
|
[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
|
|
[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
|
|
};
|
|
};
|
|
|
|
+ u32 tail_call_cnt = 0;
|
|
void *ptr;
|
|
void *ptr;
|
|
int off;
|
|
int off;
|
|
|
|
|
|
@@ -431,6 +442,30 @@ select_insn:
|
|
BPF_R4, BPF_R5);
|
|
BPF_R4, BPF_R5);
|
|
CONT;
|
|
CONT;
|
|
|
|
|
|
|
|
+ JMP_TAIL_CALL: {
|
|
|
|
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
|
|
|
|
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
|
+ struct bpf_prog *prog;
|
|
|
|
+ u64 index = BPF_R3;
|
|
|
|
+
|
|
|
|
+ if (unlikely(index >= array->map.max_entries))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ tail_call_cnt++;
|
|
|
|
+
|
|
|
|
+ prog = READ_ONCE(array->prog[index]);
|
|
|
|
+ if (unlikely(!prog))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ ARG1 = BPF_R1;
|
|
|
|
+ insn = prog->insnsi;
|
|
|
|
+ goto select_insn;
|
|
|
|
+out:
|
|
|
|
+ CONT;
|
|
|
|
+ }
|
|
/* JMP */
|
|
/* JMP */
|
|
JMP_JA:
|
|
JMP_JA:
|
|
insn += insn->off;
|
|
insn += insn->off;
|
|
@@ -619,6 +654,40 @@ void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp)
|
|
|
|
+{
|
|
|
|
+ if (array->owner_prog_type) {
|
|
|
|
+ if (array->owner_prog_type != fp->type)
|
|
|
|
+ return false;
|
|
|
|
+ if (array->owner_jited != fp->jited)
|
|
|
|
+ return false;
|
|
|
|
+ } else {
|
|
|
|
+ array->owner_prog_type = fp->type;
|
|
|
|
+ array->owner_jited = fp->jited;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int check_tail_call(const struct bpf_prog *fp)
|
|
|
|
+{
|
|
|
|
+ struct bpf_prog_aux *aux = fp->aux;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < aux->used_map_cnt; i++) {
|
|
|
|
+ struct bpf_array *array;
|
|
|
|
+ struct bpf_map *map;
|
|
|
|
+
|
|
|
|
+ map = aux->used_maps[i];
|
|
|
|
+ if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
|
|
|
|
+ continue;
|
|
|
|
+ array = container_of(map, struct bpf_array, map);
|
|
|
|
+ if (!bpf_prog_array_compatible(array, fp))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* bpf_prog_select_runtime - select execution runtime for BPF program
|
|
* bpf_prog_select_runtime - select execution runtime for BPF program
|
|
* @fp: bpf_prog populated with internal BPF program
|
|
* @fp: bpf_prog populated with internal BPF program
|
|
@@ -626,7 +695,7 @@ void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
|
* try to JIT internal BPF program, if JIT is not available select interpreter
|
|
* try to JIT internal BPF program, if JIT is not available select interpreter
|
|
* BPF program will be executed via BPF_PROG_RUN() macro
|
|
* BPF program will be executed via BPF_PROG_RUN() macro
|
|
*/
|
|
*/
|
|
-void bpf_prog_select_runtime(struct bpf_prog *fp)
|
|
|
|
|
|
+int bpf_prog_select_runtime(struct bpf_prog *fp)
|
|
{
|
|
{
|
|
fp->bpf_func = (void *) __bpf_prog_run;
|
|
fp->bpf_func = (void *) __bpf_prog_run;
|
|
|
|
|
|
@@ -634,6 +703,8 @@ void bpf_prog_select_runtime(struct bpf_prog *fp)
|
|
bpf_int_jit_compile(fp);
|
|
bpf_int_jit_compile(fp);
|
|
/* Lock whole bpf_prog as read-only */
|
|
/* Lock whole bpf_prog as read-only */
|
|
bpf_prog_lock_ro(fp);
|
|
bpf_prog_lock_ro(fp);
|
|
|
|
+
|
|
|
|
+ return check_tail_call(fp);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
|
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
|
|
|
|