|
@@ -615,19 +615,39 @@ static void free_used_maps(struct bpf_prog_aux *aux)
|
|
|
kfree(aux->used_maps);
|
|
|
}
|
|
|
|
|
|
+int __bpf_prog_charge(struct user_struct *user, u32 pages)
|
|
|
+{
|
|
|
+ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
|
|
+ unsigned long user_bufs;
|
|
|
+
|
|
|
+ if (user) {
|
|
|
+ user_bufs = atomic_long_add_return(pages, &user->locked_vm);
|
|
|
+ if (user_bufs > memlock_limit) {
|
|
|
+ atomic_long_sub(pages, &user->locked_vm);
|
|
|
+ return -EPERM;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
|
|
+{
|
|
|
+ if (user)
|
|
|
+ atomic_long_sub(pages, &user->locked_vm);
|
|
|
+}
|
|
|
+
|
|
|
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
|
|
|
{
|
|
|
struct user_struct *user = get_current_user();
|
|
|
- unsigned long memlock_limit;
|
|
|
-
|
|
|
- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
|
|
+ int ret;
|
|
|
|
|
|
- atomic_long_add(prog->pages, &user->locked_vm);
|
|
|
- if (atomic_long_read(&user->locked_vm) > memlock_limit) {
|
|
|
- atomic_long_sub(prog->pages, &user->locked_vm);
|
|
|
+ ret = __bpf_prog_charge(user, prog->pages);
|
|
|
+ if (ret) {
|
|
|
free_uid(user);
|
|
|
- return -EPERM;
|
|
|
+ return ret;
|
|
|
}
|
|
|
+
|
|
|
prog->aux->user = user;
|
|
|
return 0;
|
|
|
}
|
|
@@ -636,7 +656,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
|
|
|
{
|
|
|
struct user_struct *user = prog->aux->user;
|
|
|
|
|
|
- atomic_long_sub(prog->pages, &user->locked_vm);
|
|
|
+ __bpf_prog_uncharge(user, prog->pages);
|
|
|
free_uid(user);
|
|
|
}
|
|
|
|