|
@@ -27,6 +27,8 @@
|
|
|
DEFINE_PER_CPU(int, bpf_prog_active);
|
|
|
static DEFINE_IDR(prog_idr);
|
|
|
static DEFINE_SPINLOCK(prog_idr_lock);
|
|
|
+static DEFINE_IDR(map_idr);
|
|
|
+static DEFINE_SPINLOCK(map_idr_lock);
|
|
|
|
|
|
int sysctl_unprivileged_bpf_disabled __read_mostly;
|
|
|
|
|
@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
|
|
|
free_uid(user);
|
|
|
}
|
|
|
|
|
|
+static int bpf_map_alloc_id(struct bpf_map *map)
|
|
|
+{
|
|
|
+ int id;
|
|
|
+
|
|
|
+ spin_lock_bh(&map_idr_lock);
|
|
|
+ id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
|
|
|
+ if (id > 0)
|
|
|
+ map->id = id;
|
|
|
+ spin_unlock_bh(&map_idr_lock);
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(!id))
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ return id > 0 ? 0 : id;
|
|
|
+}
|
|
|
+
|
|
|
+static void bpf_map_free_id(struct bpf_map *map)
|
|
|
+{
|
|
|
+ spin_lock_bh(&map_idr_lock);
|
|
|
+ idr_remove(&map_idr, map->id);
|
|
|
+ spin_unlock_bh(&map_idr_lock);
|
|
|
+}
|
|
|
+
|
|
|
/* called from workqueue */
|
|
|
static void bpf_map_free_deferred(struct work_struct *work)
|
|
|
{
|
|
@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
|
|
|
void bpf_map_put(struct bpf_map *map)
|
|
|
{
|
|
|
if (atomic_dec_and_test(&map->refcnt)) {
|
|
|
+ bpf_map_free_id(map);
|
|
|
INIT_WORK(&map->work, bpf_map_free_deferred);
|
|
|
schedule_work(&map->work);
|
|
|
}
|
|
@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
|
|
|
if (err)
|
|
|
goto free_map_nouncharge;
|
|
|
|
|
|
+ err = bpf_map_alloc_id(map);
|
|
|
+ if (err)
|
|
|
+ goto free_map;
|
|
|
+
|
|
|
err = bpf_map_new_fd(map);
|
|
|
if (err < 0)
|
|
|
/* failed to allocate fd */
|
|
|
- goto free_map;
|
|
|
+ goto free_id;
|
|
|
|
|
|
trace_bpf_map_create(map, err);
|
|
|
return err;
|
|
|
|
|
|
+free_id:
|
|
|
+ bpf_map_free_id(map);
|
|
|
free_map:
|
|
|
bpf_map_uncharge_memlock(map);
|
|
|
free_map_nouncharge:
|