|
@@ -1619,6 +1619,7 @@ static void fanout_release_data(struct packet_fanout *f)
|
|
|
|
|
|
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
|
|
{
|
|
|
+ struct packet_rollover *rollover = NULL;
|
|
|
struct packet_sock *po = pkt_sk(sk);
|
|
|
struct packet_fanout *f, *match;
|
|
|
u8 type = type_flags & 0xff;
|
|
@@ -1641,23 +1642,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ mutex_lock(&fanout_mutex);
|
|
|
+
|
|
|
+ err = -EINVAL;
|
|
|
if (!po->running)
|
|
|
- return -EINVAL;
|
|
|
+ goto out;
|
|
|
|
|
|
+ err = -EALREADY;
|
|
|
if (po->fanout)
|
|
|
- return -EALREADY;
|
|
|
+ goto out;
|
|
|
|
|
|
if (type == PACKET_FANOUT_ROLLOVER ||
|
|
|
(type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
|
|
|
- po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
|
|
|
- if (!po->rollover)
|
|
|
- return -ENOMEM;
|
|
|
- atomic_long_set(&po->rollover->num, 0);
|
|
|
- atomic_long_set(&po->rollover->num_huge, 0);
|
|
|
- atomic_long_set(&po->rollover->num_failed, 0);
|
|
|
+ err = -ENOMEM;
|
|
|
+ rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
|
|
|
+ if (!rollover)
|
|
|
+ goto out;
|
|
|
+ atomic_long_set(&rollover->num, 0);
|
|
|
+ atomic_long_set(&rollover->num_huge, 0);
|
|
|
+ atomic_long_set(&rollover->num_failed, 0);
|
|
|
+ po->rollover = rollover;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&fanout_mutex);
|
|
|
match = NULL;
|
|
|
list_for_each_entry(f, &fanout_list, list) {
|
|
|
if (f->id == id &&
|
|
@@ -1704,11 +1710,11 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
|
|
}
|
|
|
}
|
|
|
out:
|
|
|
- mutex_unlock(&fanout_mutex);
|
|
|
- if (err) {
|
|
|
- kfree(po->rollover);
|
|
|
+ if (err && rollover) {
|
|
|
+ kfree(rollover);
|
|
|
po->rollover = NULL;
|
|
|
}
|
|
|
+ mutex_unlock(&fanout_mutex);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -1717,23 +1723,22 @@ static void fanout_release(struct sock *sk)
|
|
|
struct packet_sock *po = pkt_sk(sk);
|
|
|
struct packet_fanout *f;
|
|
|
|
|
|
- f = po->fanout;
|
|
|
- if (!f)
|
|
|
- return;
|
|
|
-
|
|
|
mutex_lock(&fanout_mutex);
|
|
|
- po->fanout = NULL;
|
|
|
+ f = po->fanout;
|
|
|
+ if (f) {
|
|
|
+ po->fanout = NULL;
|
|
|
+
|
|
|
+ if (atomic_dec_and_test(&f->sk_ref)) {
|
|
|
+ list_del(&f->list);
|
|
|
+ dev_remove_pack(&f->prot_hook);
|
|
|
+ fanout_release_data(f);
|
|
|
+ kfree(f);
|
|
|
+ }
|
|
|
|
|
|
- if (atomic_dec_and_test(&f->sk_ref)) {
|
|
|
- list_del(&f->list);
|
|
|
- dev_remove_pack(&f->prot_hook);
|
|
|
- fanout_release_data(f);
|
|
|
- kfree(f);
|
|
|
+ if (po->rollover)
|
|
|
+ kfree_rcu(po->rollover, rcu);
|
|
|
}
|
|
|
mutex_unlock(&fanout_mutex);
|
|
|
-
|
|
|
- if (po->rollover)
|
|
|
- kfree_rcu(po->rollover, rcu);
|
|
|
}
|
|
|
|
|
|
static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
|