|
@@ -23,7 +23,8 @@ struct bpf_cgroup_storage;
|
|
|
extern struct static_key_false cgroup_bpf_enabled_key;
|
|
|
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
|
|
|
|
|
-DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
|
|
|
+DECLARE_PER_CPU(struct bpf_cgroup_storage*,
|
|
|
+ bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
|
|
|
|
|
|
#define for_each_cgroup_storage_type(stype) \
|
|
|
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
|
|
@@ -115,15 +116,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
|
|
|
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
|
|
|
{
|
|
|
enum bpf_cgroup_storage_type stype;
|
|
|
- struct bpf_storage_buffer *buf;
|
|
|
-
|
|
|
- for_each_cgroup_storage_type(stype) {
|
|
|
- if (!storage[stype])
|
|
|
- continue;
|
|
|
|
|
|
- buf = READ_ONCE(storage[stype]->buf);
|
|
|
- this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]);
|
|
|
- }
|
|
|
+ for_each_cgroup_storage_type(stype)
|
|
|
+ this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
|
|
|
}
|
|
|
|
|
|
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
|