|
|
@@ -1896,7 +1896,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
|
|
|
e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
|
|
|
if (!e) {
|
|
|
err = -ENOMEM;
|
|
|
- goto out_progs;
|
|
|
+ goto out_free;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -2342,7 +2342,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|
|
if (err)
|
|
|
goto err;
|
|
|
|
|
|
- /* bpf_map_update_elem() can be called in_irq() */
|
|
|
+ /* psock is valid here because otherwise above *ctx_update_elem would
|
|
|
+ * have thrown an error. It is safe to skip error check.
|
|
|
+ */
|
|
|
+ psock = smap_psock_sk(sock);
|
|
|
raw_spin_lock_bh(&b->lock);
|
|
|
l_old = lookup_elem_raw(head, hash, key, key_size);
|
|
|
if (l_old && map_flags == BPF_NOEXIST) {
|
|
|
@@ -2360,12 +2363,6 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|
|
goto bucket_err;
|
|
|
}
|
|
|
|
|
|
- psock = smap_psock_sk(sock);
|
|
|
- if (unlikely(!psock)) {
|
|
|
- err = -EINVAL;
|
|
|
- goto bucket_err;
|
|
|
- }
|
|
|
-
|
|
|
rcu_assign_pointer(e->hash_link, l_new);
|
|
|
rcu_assign_pointer(e->htab,
|
|
|
container_of(map, struct bpf_htab, map));
|
|
|
@@ -2388,12 +2385,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
|
|
|
raw_spin_unlock_bh(&b->lock);
|
|
|
return 0;
|
|
|
bucket_err:
|
|
|
+ smap_release_sock(psock, sock);
|
|
|
raw_spin_unlock_bh(&b->lock);
|
|
|
err:
|
|
|
kfree(e);
|
|
|
- psock = smap_psock_sk(sock);
|
|
|
- if (psock)
|
|
|
- smap_release_sock(psock, sock);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
@@ -2472,10 +2467,8 @@ struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
|
|
|
b = __select_bucket(htab, hash);
|
|
|
head = &b->head;
|
|
|
|
|
|
- raw_spin_lock_bh(&b->lock);
|
|
|
l = lookup_elem_raw(head, hash, key, key_size);
|
|
|
sk = l ? l->sk : NULL;
|
|
|
- raw_spin_unlock_bh(&b->lock);
|
|
|
return sk;
|
|
|
}
|
|
|
|