|
@@ -17,6 +17,7 @@
|
|
#include <linux/bpf_verifier.h>
|
|
#include <linux/bpf_verifier.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kernel.h>
|
|
|
|
+#include <linux/mutex.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <net/pkt_cls.h>
|
|
#include <net/pkt_cls.h>
|
|
|
|
|
|
@@ -31,6 +32,19 @@ struct nsim_bpf_bound_prog {
|
|
struct list_head l;
|
|
struct list_head l;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+#define NSIM_BPF_MAX_KEYS 2
|
|
|
|
+
|
|
|
|
+struct nsim_bpf_bound_map {
|
|
|
|
+ struct netdevsim *ns;
|
|
|
|
+ struct bpf_offloaded_map *map;
|
|
|
|
+ struct mutex mutex;
|
|
|
|
+ struct nsim_map_entry {
|
|
|
|
+ void *key;
|
|
|
|
+ void *value;
|
|
|
|
+ } entry[NSIM_BPF_MAX_KEYS];
|
|
|
|
+ struct list_head l;
|
|
|
|
+};
|
|
|
|
+
|
|
static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
|
|
static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
|
|
{
|
|
{
|
|
const char **str = file->private;
|
|
const char **str = file->private;
|
|
@@ -284,6 +298,224 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool
|
|
|
|
+nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
|
|
|
|
+{
|
|
|
|
+ return e->key && !memcmp(key, e->key, map->key_size);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
|
|
|
|
+ if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
|
|
|
|
+ return i;
|
|
|
|
+
|
|
|
|
+ return -ENOENT;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+
|
|
|
|
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
|
|
|
|
+ if (!nmap->entry[idx].key)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
|
|
|
|
+ if (!nmap->entry[idx].value) {
|
|
|
|
+ kfree(nmap->entry[idx].key);
|
|
|
|
+ nmap->entry[idx].key = NULL;
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
|
|
|
|
+ void *key, void *next_key)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+ int idx = -ENOENT;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ if (key)
|
|
|
|
+ idx = nsim_map_key_find(offmap, key);
|
|
|
|
+ if (idx == -ENOENT)
|
|
|
|
+ idx = 0;
|
|
|
|
+ else
|
|
|
|
+ idx++;
|
|
|
|
+
|
|
|
|
+ for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
|
|
|
|
+ if (nmap->entry[idx].key) {
|
|
|
|
+ memcpy(next_key, nmap->entry[idx].key,
|
|
|
|
+ offmap->map.key_size);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ if (idx == ARRAY_SIZE(nmap->entry))
|
|
|
|
+ return -ENOENT;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+ int idx;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ idx = nsim_map_key_find(offmap, key);
|
|
|
|
+ if (idx >= 0)
|
|
|
|
+ memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ return idx < 0 ? idx : 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+nsim_map_update_elem(struct bpf_offloaded_map *offmap,
|
|
|
|
+ void *key, void *value, u64 flags)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+ int idx, err = 0;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ idx = nsim_map_key_find(offmap, key);
|
|
|
|
+ if (idx < 0 && flags == BPF_EXIST) {
|
|
|
|
+ err = idx;
|
|
|
|
+ goto exit_unlock;
|
|
|
|
+ }
|
|
|
|
+ if (idx >= 0 && flags == BPF_NOEXIST) {
|
|
|
|
+ err = -EEXIST;
|
|
|
|
+ goto exit_unlock;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (idx < 0) {
|
|
|
|
+ for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
|
|
|
|
+ if (!nmap->entry[idx].key)
|
|
|
|
+ break;
|
|
|
|
+ if (idx == ARRAY_SIZE(nmap->entry)) {
|
|
|
|
+ err = -E2BIG;
|
|
|
|
+ goto exit_unlock;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ err = nsim_map_alloc_elem(offmap, idx);
|
|
|
|
+ if (err)
|
|
|
|
+ goto exit_unlock;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
|
|
|
|
+ memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
|
|
|
|
+exit_unlock:
|
|
|
|
+ mutex_unlock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+ int idx;
|
|
|
|
+
|
|
|
|
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ idx = nsim_map_key_find(offmap, key);
|
|
|
|
+ if (idx >= 0) {
|
|
|
|
+ kfree(nmap->entry[idx].key);
|
|
|
|
+ kfree(nmap->entry[idx].value);
|
|
|
|
+ memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ return idx < 0 ? idx : 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
|
|
|
|
+ .map_get_next_key = nsim_map_get_next_key,
|
|
|
|
+ .map_lookup_elem = nsim_map_lookup_elem,
|
|
|
|
+ .map_update_elem = nsim_map_update_elem,
|
|
|
|
+ .map_delete_elem = nsim_map_delete_elem,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap;
|
|
|
|
+ unsigned int i;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
|
|
|
|
+ offmap->map.map_type != BPF_MAP_TYPE_HASH))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ if (offmap->map.map_flags)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ nmap = kzalloc(sizeof(*nmap), GFP_USER);
|
|
|
|
+ if (!nmap)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ offmap->dev_priv = nmap;
|
|
|
|
+ nmap->ns = ns;
|
|
|
|
+ nmap->map = offmap;
|
|
|
|
+ mutex_init(&nmap->mutex);
|
|
|
|
+
|
|
|
|
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
|
|
|
|
+ u32 *key;
|
|
|
|
+
|
|
|
|
+ err = nsim_map_alloc_elem(offmap, i);
|
|
|
|
+ if (err)
|
|
|
|
+ goto err_free;
|
|
|
|
+ key = nmap->entry[i].key;
|
|
|
|
+ *key = i;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ offmap->dev_ops = &nsim_bpf_map_ops;
|
|
|
|
+ list_add_tail(&nmap->l, &ns->bpf_bound_maps);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+err_free:
|
|
|
|
+ while (--i) {
|
|
|
|
+ kfree(nmap->entry[i].key);
|
|
|
|
+ kfree(nmap->entry[i].value);
|
|
|
|
+ }
|
|
|
|
+ kfree(nmap);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
|
|
|
|
+{
|
|
|
|
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
|
|
|
|
+ kfree(nmap->entry[i].key);
|
|
|
|
+ kfree(nmap->entry[i].value);
|
|
|
|
+ }
|
|
|
|
+ list_del_init(&nmap->l);
|
|
|
|
+ mutex_destroy(&nmap->mutex);
|
|
|
|
+ kfree(nmap);
|
|
|
|
+}
|
|
|
|
+
|
|
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
|
|
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
|
|
{
|
|
{
|
|
struct netdevsim *ns = netdev_priv(dev);
|
|
struct netdevsim *ns = netdev_priv(dev);
|
|
@@ -328,6 +560,14 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
|
|
return err;
|
|
return err;
|
|
|
|
|
|
return nsim_xdp_set_prog(ns, bpf);
|
|
return nsim_xdp_set_prog(ns, bpf);
|
|
|
|
+ case BPF_OFFLOAD_MAP_ALLOC:
|
|
|
|
+ if (!ns->bpf_map_accept)
|
|
|
|
+ return -EOPNOTSUPP;
|
|
|
|
+
|
|
|
|
+ return nsim_bpf_map_alloc(ns, bpf->offmap);
|
|
|
|
+ case BPF_OFFLOAD_MAP_FREE:
|
|
|
|
+ nsim_bpf_map_free(bpf->offmap);
|
|
|
|
+ return 0;
|
|
default:
|
|
default:
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -336,6 +576,7 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
|
|
int nsim_bpf_init(struct netdevsim *ns)
|
|
int nsim_bpf_init(struct netdevsim *ns)
|
|
{
|
|
{
|
|
INIT_LIST_HEAD(&ns->bpf_bound_progs);
|
|
INIT_LIST_HEAD(&ns->bpf_bound_progs);
|
|
|
|
+ INIT_LIST_HEAD(&ns->bpf_bound_maps);
|
|
|
|
|
|
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
|
|
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
|
|
&ns->bpf_offloaded_id);
|
|
&ns->bpf_offloaded_id);
|
|
@@ -362,12 +603,17 @@ int nsim_bpf_init(struct netdevsim *ns)
|
|
debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
|
|
debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
|
|
&ns->bpf_xdpoffload_accept);
|
|
&ns->bpf_xdpoffload_accept);
|
|
|
|
|
|
|
|
+ ns->bpf_map_accept = true;
|
|
|
|
+ debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
|
|
|
|
+ &ns->bpf_map_accept);
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
void nsim_bpf_uninit(struct netdevsim *ns)
|
|
void nsim_bpf_uninit(struct netdevsim *ns)
|
|
{
|
|
{
|
|
WARN_ON(!list_empty(&ns->bpf_bound_progs));
|
|
WARN_ON(!list_empty(&ns->bpf_bound_progs));
|
|
|
|
+ WARN_ON(!list_empty(&ns->bpf_bound_maps));
|
|
WARN_ON(ns->xdp_prog);
|
|
WARN_ON(ns->xdp_prog);
|
|
WARN_ON(ns->bpf_offloaded);
|
|
WARN_ON(ns->bpf_offloaded);
|
|
}
|
|
}
|