|
@@ -181,32 +181,60 @@ int bpf_map_precharge_memlock(u32 pages)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int bpf_map_charge_memlock(struct bpf_map *map)
|
|
|
+static int bpf_charge_memlock(struct user_struct *user, u32 pages)
|
|
|
{
|
|
|
- struct user_struct *user = get_current_user();
|
|
|
- unsigned long memlock_limit;
|
|
|
+ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
|
|
|
|
|
- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
|
|
+ if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
|
|
|
+ atomic_long_sub(pages, &user->locked_vm);
|
|
|
+ return -EPERM;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- atomic_long_add(map->pages, &user->locked_vm);
|
|
|
+static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
|
|
|
+{
|
|
|
+ atomic_long_sub(pages, &user->locked_vm);
|
|
|
+}
|
|
|
+
|
|
|
+static int bpf_map_init_memlock(struct bpf_map *map)
|
|
|
+{
|
|
|
+ struct user_struct *user = get_current_user();
|
|
|
+ int ret;
|
|
|
|
|
|
- if (atomic_long_read(&user->locked_vm) > memlock_limit) {
|
|
|
- atomic_long_sub(map->pages, &user->locked_vm);
|
|
|
+ ret = bpf_charge_memlock(user, map->pages);
|
|
|
+ if (ret) {
|
|
|
free_uid(user);
|
|
|
- return -EPERM;
|
|
|
+ return ret;
|
|
|
}
|
|
|
map->user = user;
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static void bpf_map_uncharge_memlock(struct bpf_map *map)
|
|
|
+static void bpf_map_release_memlock(struct bpf_map *map)
|
|
|
{
|
|
|
struct user_struct *user = map->user;
|
|
|
-
|
|
|
- atomic_long_sub(map->pages, &user->locked_vm);
|
|
|
+ bpf_uncharge_memlock(user, map->pages);
|
|
|
free_uid(user);
|
|
|
}
|
|
|
|
|
|
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = bpf_charge_memlock(map->user, pages);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ map->pages += pages;
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
|
|
|
+{
|
|
|
+ bpf_uncharge_memlock(map->user, pages);
|
|
|
+ map->pages -= pages;
|
|
|
+}
|
|
|
+
|
|
|
static int bpf_map_alloc_id(struct bpf_map *map)
|
|
|
{
|
|
|
int id;
|
|
@@ -256,7 +284,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
|
|
|
{
|
|
|
struct bpf_map *map = container_of(work, struct bpf_map, work);
|
|
|
|
|
|
- bpf_map_uncharge_memlock(map);
|
|
|
+ bpf_map_release_memlock(map);
|
|
|
security_bpf_map_free(map);
|
|
|
/* implementation dependent freeing */
|
|
|
map->ops->map_free(map);
|
|
@@ -492,7 +520,7 @@ static int map_create(union bpf_attr *attr)
|
|
|
if (err)
|
|
|
goto free_map_nouncharge;
|
|
|
|
|
|
- err = bpf_map_charge_memlock(map);
|
|
|
+ err = bpf_map_init_memlock(map);
|
|
|
if (err)
|
|
|
goto free_map_sec;
|
|
|
|
|
@@ -515,7 +543,7 @@ static int map_create(union bpf_attr *attr)
|
|
|
return err;
|
|
|
|
|
|
free_map:
|
|
|
- bpf_map_uncharge_memlock(map);
|
|
|
+ bpf_map_release_memlock(map);
|
|
|
free_map_sec:
|
|
|
security_bpf_map_free(map);
|
|
|
free_map_nouncharge:
|