|
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|
|
{
|
|
|
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
|
|
int numa_node = bpf_map_attr_numa_node(attr);
|
|
|
+ u32 elem_size, index_mask, max_entries;
|
|
|
+ bool unpriv = !capable(CAP_SYS_ADMIN);
|
|
|
struct bpf_array *array;
|
|
|
u64 array_size;
|
|
|
- u32 elem_size;
|
|
|
|
|
|
/* check sanity of attributes */
|
|
|
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
|
@@ -72,11 +73,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|
|
|
|
|
elem_size = round_up(attr->value_size, 8);
|
|
|
|
|
|
+ max_entries = attr->max_entries;
|
|
|
+ index_mask = roundup_pow_of_two(max_entries) - 1;
|
|
|
+
|
|
|
+ if (unpriv)
|
|
|
+ /* round up array size to nearest power of 2,
|
|
|
+ * since cpu will speculate within index_mask limits
|
|
|
+ */
|
|
|
+ max_entries = index_mask + 1;
|
|
|
+
|
|
|
array_size = sizeof(*array);
|
|
|
if (percpu)
|
|
|
- array_size += (u64) attr->max_entries * sizeof(void *);
|
|
|
+ array_size += (u64) max_entries * sizeof(void *);
|
|
|
else
|
|
|
- array_size += (u64) attr->max_entries * elem_size;
|
|
|
+ array_size += (u64) max_entries * elem_size;
|
|
|
|
|
|
/* make sure there is no u32 overflow later in round_up() */
|
|
|
if (array_size >= U32_MAX - PAGE_SIZE)
|
|
@@ -86,6 +96,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|
|
array = bpf_map_area_alloc(array_size, numa_node);
|
|
|
if (!array)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
+ array->index_mask = index_mask;
|
|
|
+ array->map.unpriv_array = unpriv;
|
|
|
|
|
|
/* copy mandatory map attributes */
|
|
|
array->map.map_type = attr->map_type;
|
|
@@ -121,12 +133,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
|
|
|
if (unlikely(index >= array->map.max_entries))
|
|
|
return NULL;
|
|
|
|
|
|
- return array->value + array->elem_size * index;
|
|
|
+ return array->value + array->elem_size * (index & array->index_mask);
|
|
|
}
|
|
|
|
|
|
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
|
|
|
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
|
|
{
|
|
|
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
struct bpf_insn *insn = insn_buf;
|
|
|
u32 elem_size = round_up(map->value_size, 8);
|
|
|
const int ret = BPF_REG_0;
|
|
@@ -135,7 +148,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
|
|
|
|
|
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
|
|
|
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
|
|
|
- *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
|
|
|
+ if (map->unpriv_array) {
|
|
|
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
|
|
|
+ *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
|
|
|
+ } else {
|
|
|
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
|
|
|
+ }
|
|
|
|
|
|
if (is_power_of_2(elem_size)) {
|
|
|
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
|
|
@@ -157,7 +175,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
|
|
|
if (unlikely(index >= array->map.max_entries))
|
|
|
return NULL;
|
|
|
|
|
|
- return this_cpu_ptr(array->pptrs[index]);
|
|
|
+ return this_cpu_ptr(array->pptrs[index & array->index_mask]);
|
|
|
}
|
|
|
|
|
|
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
|
@@ -177,7 +195,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
|
|
*/
|
|
|
size = round_up(map->value_size, 8);
|
|
|
rcu_read_lock();
|
|
|
- pptr = array->pptrs[index];
|
|
|
+ pptr = array->pptrs[index & array->index_mask];
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
|
|
|
off += size;
|
|
@@ -225,10 +243,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|
|
return -EEXIST;
|
|
|
|
|
|
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
|
|
|
- memcpy(this_cpu_ptr(array->pptrs[index]),
|
|
|
+ memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
|
|
|
value, map->value_size);
|
|
|
else
|
|
|
- memcpy(array->value + array->elem_size * index,
|
|
|
+ memcpy(array->value +
|
|
|
+ array->elem_size * (index & array->index_mask),
|
|
|
value, map->value_size);
|
|
|
return 0;
|
|
|
}
|
|
@@ -262,7 +281,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
|
|
|
*/
|
|
|
size = round_up(map->value_size, 8);
|
|
|
rcu_read_lock();
|
|
|
- pptr = array->pptrs[index];
|
|
|
+ pptr = array->pptrs[index & array->index_mask];
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
|
|
|
off += size;
|
|
@@ -613,6 +632,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
|
|
|
static u32 array_of_map_gen_lookup(struct bpf_map *map,
|
|
|
struct bpf_insn *insn_buf)
|
|
|
{
|
|
|
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
|
|
|
u32 elem_size = round_up(map->value_size, 8);
|
|
|
struct bpf_insn *insn = insn_buf;
|
|
|
const int ret = BPF_REG_0;
|
|
@@ -621,7 +641,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
|
|
|
|
|
|
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
|
|
|
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
|
|
|
- *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
|
|
|
+ if (map->unpriv_array) {
|
|
|
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
|
|
|
+ *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
|
|
|
+ } else {
|
|
|
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
|
|
|
+ }
|
|
|
if (is_power_of_2(elem_size))
|
|
|
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
|
|
|
else
|