123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158 |
- /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- * Copyright (c) 2016 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
- #include <linux/bpf.h>
- #include <linux/jhash.h>
- #include <linux/filter.h>
- #include <linux/vmalloc.h>
- #include "percpu_freelist.h"
- #include "bpf_lru_list.h"
- struct bucket {
- struct hlist_head head;
- raw_spinlock_t lock;
- };
- struct bpf_htab {
- struct bpf_map map;
- struct bucket *buckets;
- void *elems;
- union {
- struct pcpu_freelist freelist;
- struct bpf_lru lru;
- };
- void __percpu *extra_elems;
- atomic_t count; /* number of elements in this hashtable */
- u32 n_buckets; /* number of hash buckets */
- u32 elem_size; /* size of each element in bytes */
- };
- enum extra_elem_state {
- HTAB_NOT_AN_EXTRA_ELEM = 0,
- HTAB_EXTRA_ELEM_FREE,
- HTAB_EXTRA_ELEM_USED
- };
- /* each htab element is struct htab_elem + key + value */
- struct htab_elem {
- union {
- struct hlist_node hash_node;
- struct bpf_htab *htab;
- struct pcpu_freelist_node fnode;
- };
- union {
- struct rcu_head rcu;
- enum extra_elem_state state;
- struct bpf_lru_node lru_node;
- };
- u32 hash;
- char key[0] __aligned(8);
- };
- static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
- static bool htab_is_lru(const struct bpf_htab *htab)
- {
- return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
- htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
- }
- static bool htab_is_percpu(const struct bpf_htab *htab)
- {
- return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
- htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
- }
- static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
- void __percpu *pptr)
- {
- *(void __percpu **)(l->key + key_size) = pptr;
- }
- static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
- {
- return *(void __percpu **)(l->key + key_size);
- }
- static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
- {
- return (struct htab_elem *) (htab->elems + i * htab->elem_size);
- }
- static void htab_free_elems(struct bpf_htab *htab)
- {
- int i;
- if (!htab_is_percpu(htab))
- goto free_elems;
- for (i = 0; i < htab->map.max_entries; i++) {
- void __percpu *pptr;
- pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
- htab->map.key_size);
- free_percpu(pptr);
- }
- free_elems:
- vfree(htab->elems);
- }
- static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
- u32 hash)
- {
- struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
- struct htab_elem *l;
- if (node) {
- l = container_of(node, struct htab_elem, lru_node);
- memcpy(l->key, key, htab->map.key_size);
- return l;
- }
- return NULL;
- }
- static int prealloc_init(struct bpf_htab *htab)
- {
- int err = -ENOMEM, i;
- htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
- if (!htab->elems)
- return -ENOMEM;
- if (!htab_is_percpu(htab))
- goto skip_percpu_elems;
- for (i = 0; i < htab->map.max_entries; i++) {
- u32 size = round_up(htab->map.value_size, 8);
- void __percpu *pptr;
- pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
- if (!pptr)
- goto free_elems;
- htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
- pptr);
- }
- skip_percpu_elems:
- if (htab_is_lru(htab))
- err = bpf_lru_init(&htab->lru,
- htab->map.map_flags & BPF_F_NO_COMMON_LRU,
- offsetof(struct htab_elem, hash) -
- offsetof(struct htab_elem, lru_node),
- htab_lru_map_delete_node,
- htab);
- else
- err = pcpu_freelist_init(&htab->freelist);
- if (err)
- goto free_elems;
- if (htab_is_lru(htab))
- bpf_lru_populate(&htab->lru, htab->elems,
- offsetof(struct htab_elem, lru_node),
- htab->elem_size, htab->map.max_entries);
- else
- pcpu_freelist_populate(&htab->freelist, htab->elems,
- htab->elem_size, htab->map.max_entries);
- return 0;
- free_elems:
- htab_free_elems(htab);
- return err;
- }
- static void prealloc_destroy(struct bpf_htab *htab)
- {
- htab_free_elems(htab);
- if (htab_is_lru(htab))
- bpf_lru_destroy(&htab->lru);
- else
- pcpu_freelist_destroy(&htab->freelist);
- }
- static int alloc_extra_elems(struct bpf_htab *htab)
- {
- void __percpu *pptr;
- int cpu;
- pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
- if (!pptr)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
- HTAB_EXTRA_ELEM_FREE;
- }
- htab->extra_elems = pptr;
- return 0;
- }
- /* Called from syscall */
- static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
- {
- bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
- attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
- bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
- attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
- /* percpu_lru means each cpu has its own LRU list.
- * it is different from BPF_MAP_TYPE_PERCPU_HASH where
- * the map's value itself is percpu. percpu_lru has
- * nothing to do with the map's value.
- */
- bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
- bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
- struct bpf_htab *htab;
- int err, i;
- u64 cost;
- if (lru && !capable(CAP_SYS_ADMIN))
- /* LRU implementation is much complicated than other
- * maps. Hence, limit to CAP_SYS_ADMIN for now.
- */
- return ERR_PTR(-EPERM);
- if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU))
- /* reserved bits should not be used */
- return ERR_PTR(-EINVAL);
- if (!lru && percpu_lru)
- return ERR_PTR(-EINVAL);
- if (lru && !prealloc)
- return ERR_PTR(-ENOTSUPP);
- htab = kzalloc(sizeof(*htab), GFP_USER);
- if (!htab)
- return ERR_PTR(-ENOMEM);
- /* mandatory map attributes */
- htab->map.map_type = attr->map_type;
- htab->map.key_size = attr->key_size;
- htab->map.value_size = attr->value_size;
- htab->map.max_entries = attr->max_entries;
- htab->map.map_flags = attr->map_flags;
- /* check sanity of attributes.
- * value_size == 0 may be allowed in the future to use map as a set
- */
- err = -EINVAL;
- if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
- htab->map.value_size == 0)
- goto free_htab;
- if (percpu_lru) {
- /* ensure each CPU's lru list has >=1 elements.
- * since we are at it, make each lru list has the same
- * number of elements.
- */
- htab->map.max_entries = roundup(attr->max_entries,
- num_possible_cpus());
- if (htab->map.max_entries < attr->max_entries)
- htab->map.max_entries = rounddown(attr->max_entries,
- num_possible_cpus());
- }
- /* hash table size must be power of 2 */
- htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
- err = -E2BIG;
- if (htab->map.key_size > MAX_BPF_STACK)
- /* eBPF programs initialize keys on stack, so they cannot be
- * larger than max stack size
- */
- goto free_htab;
- if (htab->map.value_size >= KMALLOC_MAX_SIZE -
- MAX_BPF_STACK - sizeof(struct htab_elem))
- /* if value_size is bigger, the user space won't be able to
- * access the elements via bpf syscall. This check also makes
- * sure that the elem_size doesn't overflow and it's
- * kmalloc-able later in htab_map_update_elem()
- */
- goto free_htab;
- if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
- /* make sure the size for pcpu_alloc() is reasonable */
- goto free_htab;
- htab->elem_size = sizeof(struct htab_elem) +
- round_up(htab->map.key_size, 8);
- if (percpu)
- htab->elem_size += sizeof(void *);
- else
- htab->elem_size += round_up(htab->map.value_size, 8);
- /* prevent zero size kmalloc and check for u32 overflow */
- if (htab->n_buckets == 0 ||
- htab->n_buckets > U32_MAX / sizeof(struct bucket))
- goto free_htab;
- cost = (u64) htab->n_buckets * sizeof(struct bucket) +
- (u64) htab->elem_size * htab->map.max_entries;
- if (percpu)
- cost += (u64) round_up(htab->map.value_size, 8) *
- num_possible_cpus() * htab->map.max_entries;
- else
- cost += (u64) htab->elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- /* make sure page count doesn't overflow */
- goto free_htab;
- htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- /* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(htab->map.pages);
- if (err)
- goto free_htab;
- err = -ENOMEM;
- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
- GFP_USER | __GFP_NOWARN);
- if (!htab->buckets) {
- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
- if (!htab->buckets)
- goto free_htab;
- }
- for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_HEAD(&htab->buckets[i].head);
- raw_spin_lock_init(&htab->buckets[i].lock);
- }
- if (!percpu && !lru) {
- /* lru itself can remove the least used element, so
- * there is no need for an extra elem during map_update.
- */
- err = alloc_extra_elems(htab);
- if (err)
- goto free_buckets;
- }
- if (prealloc) {
- err = prealloc_init(htab);
- if (err)
- goto free_extra_elems;
- }
- return &htab->map;
- free_extra_elems:
- free_percpu(htab->extra_elems);
- free_buckets:
- kvfree(htab->buckets);
- free_htab:
- kfree(htab);
- return ERR_PTR(err);
- }
- static inline u32 htab_map_hash(const void *key, u32 key_len)
- {
- return jhash(key, key_len, 0);
- }
- static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
- {
- return &htab->buckets[hash & (htab->n_buckets - 1)];
- }
- static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
- {
- return &__select_bucket(htab, hash)->head;
- }
- static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
- void *key, u32 key_size)
- {
- struct htab_elem *l;
- hlist_for_each_entry_rcu(l, head, hash_node)
- if (l->hash == hash && !memcmp(&l->key, key, key_size))
- return l;
- return NULL;
- }
- /* Called from syscall or from eBPF program */
- static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
- struct htab_elem *l;
- u32 hash, key_size;
- /* Must be called with rcu_read_lock. */
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- head = select_bucket(htab, hash);
- l = lookup_elem_raw(head, hash, key, key_size);
- return l;
- }
- static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
- {
- struct htab_elem *l = __htab_map_lookup_elem(map, key);
- if (l)
- return l->key + round_up(map->key_size, 8);
- return NULL;
- }
- static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
- {
- struct htab_elem *l = __htab_map_lookup_elem(map, key);
- if (l) {
- bpf_lru_node_set_ref(&l->lru_node);
- return l->key + round_up(map->key_size, 8);
- }
- return NULL;
- }
- /* It is called from the bpf_lru_list when the LRU needs to delete
- * older elements from the htab.
- */
- static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
- {
- struct bpf_htab *htab = (struct bpf_htab *)arg;
- struct htab_elem *l, *tgt_l;
- struct hlist_head *head;
- unsigned long flags;
- struct bucket *b;
- tgt_l = container_of(node, struct htab_elem, lru_node);
- b = __select_bucket(htab, tgt_l->hash);
- head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
- hlist_for_each_entry_rcu(l, head, hash_node)
- if (l == tgt_l) {
- hlist_del_rcu(&l->hash_node);
- break;
- }
- raw_spin_unlock_irqrestore(&b->lock, flags);
- return l == tgt_l;
- }
- /* Called from syscall */
- static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
- struct htab_elem *l, *next_l;
- u32 hash, key_size;
- int i;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- head = select_bucket(htab, hash);
- /* lookup the key */
- l = lookup_elem_raw(head, hash, key, key_size);
- if (!l) {
- i = 0;
- goto find_first_elem;
- }
- /* key was found, get next key in the same bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
- struct htab_elem, hash_node);
- if (next_l) {
- /* if next elem in this hash list is non-zero, just return it */
- memcpy(next_key, next_l->key, key_size);
- return 0;
- }
- /* no more elements in this hash list, go to the next bucket */
- i = hash & (htab->n_buckets - 1);
- i++;
- find_first_elem:
- /* iterate over buckets */
- for (; i < htab->n_buckets; i++) {
- head = select_bucket(htab, i);
- /* pick first element in the bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
- struct htab_elem, hash_node);
- if (next_l) {
- /* if it's not empty, just return it */
- memcpy(next_key, next_l->key, key_size);
- return 0;
- }
- }
- /* iterated over all buckets and all elements */
- return -ENOENT;
- }
- static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
- {
- if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
- free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
- kfree(l);
- }
- static void htab_elem_free_rcu(struct rcu_head *head)
- {
- struct htab_elem *l = container_of(head, struct htab_elem, rcu);
- struct bpf_htab *htab = l->htab;
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
- * we're calling kfree, otherwise deadlock is possible if kprobes
- * are placed somewhere inside of slub
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
- htab_elem_free(htab, l);
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
- }
- static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
- {
- if (l->state == HTAB_EXTRA_ELEM_USED) {
- l->state = HTAB_EXTRA_ELEM_FREE;
- return;
- }
- if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
- pcpu_freelist_push(&htab->freelist, &l->fnode);
- } else {
- atomic_dec(&htab->count);
- l->htab = htab;
- call_rcu(&l->rcu, htab_elem_free_rcu);
- }
- }
- static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
- void *value, bool onallcpus)
- {
- if (!onallcpus) {
- /* copy true value_size bytes */
- memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
- } else {
- u32 size = round_up(htab->map.value_size, 8);
- int off = 0, cpu;
- for_each_possible_cpu(cpu) {
- bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
- value + off, size);
- off += size;
- }
- }
- }
- static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
- void *value, u32 key_size, u32 hash,
- bool percpu, bool onallcpus,
- bool old_elem_exists)
- {
- u32 size = htab->map.value_size;
- bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
- struct htab_elem *l_new;
- void __percpu *pptr;
- int err = 0;
- if (prealloc) {
- l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
- if (!l_new)
- err = -E2BIG;
- } else {
- if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
- atomic_dec(&htab->count);
- err = -E2BIG;
- } else {
- l_new = kmalloc(htab->elem_size,
- GFP_ATOMIC | __GFP_NOWARN);
- if (!l_new)
- return ERR_PTR(-ENOMEM);
- }
- }
- if (err) {
- if (!old_elem_exists)
- return ERR_PTR(err);
- /* if we're updating the existing element and the hash table
- * is full, use per-cpu extra elems
- */
- l_new = this_cpu_ptr(htab->extra_elems);
- if (l_new->state != HTAB_EXTRA_ELEM_FREE)
- return ERR_PTR(-E2BIG);
- l_new->state = HTAB_EXTRA_ELEM_USED;
- } else {
- l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
- }
- memcpy(l_new->key, key, key_size);
- if (percpu) {
- /* round up value_size to 8 bytes */
- size = round_up(size, 8);
- if (prealloc) {
- pptr = htab_elem_get_ptr(l_new, key_size);
- } else {
- /* alloc_percpu zero-fills */
- pptr = __alloc_percpu_gfp(size, 8,
- GFP_ATOMIC | __GFP_NOWARN);
- if (!pptr) {
- kfree(l_new);
- return ERR_PTR(-ENOMEM);
- }
- }
- pcpu_copy_value(htab, pptr, value, onallcpus);
- if (!prealloc)
- htab_elem_set_ptr(l_new, key_size, pptr);
- } else {
- memcpy(l_new->key + round_up(key_size, 8), value, size);
- }
- l_new->hash = hash;
- return l_new;
- }
- static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
- u64 map_flags)
- {
- if (l_old && map_flags == BPF_NOEXIST)
- /* elem already exists */
- return -EEXIST;
- if (!l_old && map_flags == BPF_EXIST)
- /* elem doesn't exist, cannot update it */
- return -ENOENT;
- return 0;
- }
- /* Called from syscall or from eBPF program */
- static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
- u64 map_flags)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
- unsigned long flags;
- struct bucket *b;
- u32 key_size, hash;
- int ret;
- if (unlikely(map_flags > BPF_EXIST))
- /* unknown flags */
- return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
- l_old = lookup_elem_raw(head, hash, key, key_size);
- ret = check_flags(htab, l_old, map_flags);
- if (ret)
- goto err;
- l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
- !!l_old);
- if (IS_ERR(l_new)) {
- /* all pre-allocated elements are in use or memory exhausted */
- ret = PTR_ERR(l_new);
- goto err;
- }
- /* add new element to the head of the list, so that
- * concurrent search will find it before old elem
- */
- hlist_add_head_rcu(&l_new->hash_node, head);
- if (l_old) {
- hlist_del_rcu(&l_old->hash_node);
- free_htab_elem(htab, l_old);
- }
- ret = 0;
- err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
- return ret;
- }
- static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
- u64 map_flags)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct htab_elem *l_new, *l_old = NULL;
- struct hlist_head *head;
- unsigned long flags;
- struct bucket *b;
- u32 key_size, hash;
- int ret;
- if (unlikely(map_flags > BPF_EXIST))
- /* unknown flags */
- return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
- /* For LRU, we need to alloc before taking bucket's
- * spinlock because getting free nodes from LRU may need
- * to remove older elements from htab and this removal
- * operation will need a bucket lock.
- */
- l_new = prealloc_lru_pop(htab, key, hash);
- if (!l_new)
- return -ENOMEM;
- memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
- l_old = lookup_elem_raw(head, hash, key, key_size);
- ret = check_flags(htab, l_old, map_flags);
- if (ret)
- goto err;
- /* add new element to the head of the list, so that
- * concurrent search will find it before old elem
- */
- hlist_add_head_rcu(&l_new->hash_node, head);
- if (l_old) {
- bpf_lru_node_set_ref(&l_new->lru_node);
- hlist_del_rcu(&l_old->hash_node);
- }
- ret = 0;
- err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
- if (ret)
- bpf_lru_push_free(&htab->lru, &l_new->lru_node);
- else if (l_old)
- bpf_lru_push_free(&htab->lru, &l_old->lru_node);
- return ret;
- }
- static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags,
- bool onallcpus)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
- unsigned long flags;
- struct bucket *b;
- u32 key_size, hash;
- int ret;
- if (unlikely(map_flags > BPF_EXIST))
- /* unknown flags */
- return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
- l_old = lookup_elem_raw(head, hash, key, key_size);
- ret = check_flags(htab, l_old, map_flags);
- if (ret)
- goto err;
- if (l_old) {
- /* per-cpu hash map can update value in-place */
- pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
- value, onallcpus);
- } else {
- l_new = alloc_htab_elem(htab, key, value, key_size,
- hash, true, onallcpus, false);
- if (IS_ERR(l_new)) {
- ret = PTR_ERR(l_new);
- goto err;
- }
- hlist_add_head_rcu(&l_new->hash_node, head);
- }
- ret = 0;
- err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
- return ret;
- }
- static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags,
- bool onallcpus)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
- unsigned long flags;
- struct bucket *b;
- u32 key_size, hash;
- int ret;
- if (unlikely(map_flags > BPF_EXIST))
- /* unknown flags */
- return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
- /* For LRU, we need to alloc before taking bucket's
- * spinlock because LRU's elem alloc may need
- * to remove older elem from htab and this removal
- * operation will need a bucket lock.
- */
- if (map_flags != BPF_EXIST) {
- l_new = prealloc_lru_pop(htab, key, hash);
- if (!l_new)
- return -ENOMEM;
- }
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
- l_old = lookup_elem_raw(head, hash, key, key_size);
- ret = check_flags(htab, l_old, map_flags);
- if (ret)
- goto err;
- if (l_old) {
- bpf_lru_node_set_ref(&l_old->lru_node);
- /* per-cpu hash map can update value in-place */
- pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
- value, onallcpus);
- } else {
- pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
- value, onallcpus);
- hlist_add_head_rcu(&l_new->hash_node, head);
- l_new = NULL;
- }
- ret = 0;
- err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
- if (l_new)
- bpf_lru_push_free(&htab->lru, &l_new->lru_node);
- return ret;
- }
- static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags)
- {
- return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
- }
- static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags)
- {
- return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
- false);
- }
- /* Called from syscall or from eBPF program */
- static int htab_map_delete_elem(struct bpf_map *map, void *key)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
- struct bucket *b;
- struct htab_elem *l;
- unsigned long flags;
- u32 hash, key_size;
- int ret = -ENOENT;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
- l = lookup_elem_raw(head, hash, key, key_size);
- if (l) {
- hlist_del_rcu(&l->hash_node);
- free_htab_elem(htab, l);
- ret = 0;
- }
- raw_spin_unlock_irqrestore(&b->lock, flags);
- return ret;
- }
- static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
- struct bucket *b;
- struct htab_elem *l;
- unsigned long flags;
- u32 hash, key_size;
- int ret = -ENOENT;
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
- l = lookup_elem_raw(head, hash, key, key_size);
- if (l) {
- hlist_del_rcu(&l->hash_node);
- ret = 0;
- }
- raw_spin_unlock_irqrestore(&b->lock, flags);
- if (l)
- bpf_lru_push_free(&htab->lru, &l->lru_node);
- return ret;
- }
- static void delete_all_elements(struct bpf_htab *htab)
- {
- int i;
- for (i = 0; i < htab->n_buckets; i++) {
- struct hlist_head *head = select_bucket(htab, i);
- struct hlist_node *n;
- struct htab_elem *l;
- hlist_for_each_entry_safe(l, n, head, hash_node) {
- hlist_del_rcu(&l->hash_node);
- if (l->state != HTAB_EXTRA_ELEM_USED)
- htab_elem_free(htab, l);
- }
- }
- }
- /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
- static void htab_map_free(struct bpf_map *map)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
- * so the programs (can be more than one that used this map) were
- * disconnected from events. Wait for outstanding critical sections in
- * these programs to complete
- */
- synchronize_rcu();
- /* some of free_htab_elem() callbacks for elements of this map may
- * not have executed. Wait for them.
- */
- rcu_barrier();
- if (htab->map.map_flags & BPF_F_NO_PREALLOC)
- delete_all_elements(htab);
- else
- prealloc_destroy(htab);
- free_percpu(htab->extra_elems);
- kvfree(htab->buckets);
- kfree(htab);
- }
- static const struct bpf_map_ops htab_ops = {
- .map_alloc = htab_map_alloc,
- .map_free = htab_map_free,
- .map_get_next_key = htab_map_get_next_key,
- .map_lookup_elem = htab_map_lookup_elem,
- .map_update_elem = htab_map_update_elem,
- .map_delete_elem = htab_map_delete_elem,
- };
- static struct bpf_map_type_list htab_type __read_mostly = {
- .ops = &htab_ops,
- .type = BPF_MAP_TYPE_HASH,
- };
- static const struct bpf_map_ops htab_lru_ops = {
- .map_alloc = htab_map_alloc,
- .map_free = htab_map_free,
- .map_get_next_key = htab_map_get_next_key,
- .map_lookup_elem = htab_lru_map_lookup_elem,
- .map_update_elem = htab_lru_map_update_elem,
- .map_delete_elem = htab_lru_map_delete_elem,
- };
- static struct bpf_map_type_list htab_lru_type __read_mostly = {
- .ops = &htab_lru_ops,
- .type = BPF_MAP_TYPE_LRU_HASH,
- };
- /* Called from eBPF program */
- static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
- {
- struct htab_elem *l = __htab_map_lookup_elem(map, key);
- if (l)
- return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
- else
- return NULL;
- }
- static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
- {
- struct htab_elem *l = __htab_map_lookup_elem(map, key);
- if (l) {
- bpf_lru_node_set_ref(&l->lru_node);
- return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
- }
- return NULL;
- }
- int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct htab_elem *l;
- void __percpu *pptr;
- int ret = -ENOENT;
- int cpu, off = 0;
- u32 size;
- /* per_cpu areas are zero-filled and bpf programs can only
- * access 'value_size' of them, so copying rounded areas
- * will not leak any kernel data
- */
- size = round_up(map->value_size, 8);
- rcu_read_lock();
- l = __htab_map_lookup_elem(map, key);
- if (!l)
- goto out;
- if (htab_is_lru(htab))
- bpf_lru_node_set_ref(&l->lru_node);
- pptr = htab_elem_get_ptr(l, map->key_size);
- for_each_possible_cpu(cpu) {
- bpf_long_memcpy(value + off,
- per_cpu_ptr(pptr, cpu), size);
- off += size;
- }
- ret = 0;
- out:
- rcu_read_unlock();
- return ret;
- }
- int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
- u64 map_flags)
- {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- int ret;
- rcu_read_lock();
- if (htab_is_lru(htab))
- ret = __htab_lru_percpu_map_update_elem(map, key, value,
- map_flags, true);
- else
- ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
- true);
- rcu_read_unlock();
- return ret;
- }
- static const struct bpf_map_ops htab_percpu_ops = {
- .map_alloc = htab_map_alloc,
- .map_free = htab_map_free,
- .map_get_next_key = htab_map_get_next_key,
- .map_lookup_elem = htab_percpu_map_lookup_elem,
- .map_update_elem = htab_percpu_map_update_elem,
- .map_delete_elem = htab_map_delete_elem,
- };
- static struct bpf_map_type_list htab_percpu_type __read_mostly = {
- .ops = &htab_percpu_ops,
- .type = BPF_MAP_TYPE_PERCPU_HASH,
- };
- static const struct bpf_map_ops htab_lru_percpu_ops = {
- .map_alloc = htab_map_alloc,
- .map_free = htab_map_free,
- .map_get_next_key = htab_map_get_next_key,
- .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
- .map_update_elem = htab_lru_percpu_map_update_elem,
- .map_delete_elem = htab_lru_map_delete_elem,
- };
- static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = {
- .ops = &htab_lru_percpu_ops,
- .type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
- };
- static int __init register_htab_map(void)
- {
- bpf_register_map_type(&htab_type);
- bpf_register_map_type(&htab_percpu_type);
- bpf_register_map_type(&htab_lru_type);
- bpf_register_map_type(&htab_lru_percpu_type);
- return 0;
- }
- late_initcall(register_htab_map);
|