|
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
|
|
*/
|
|
*/
|
|
int peernet2id_alloc(struct net *net, struct net *peer)
|
|
int peernet2id_alloc(struct net *net, struct net *peer)
|
|
{
|
|
{
|
|
- bool alloc;
|
|
|
|
|
|
+ bool alloc = false, alive = false;
|
|
int id;
|
|
int id;
|
|
|
|
|
|
if (refcount_read(&net->count) == 0)
|
|
if (refcount_read(&net->count) == 0)
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
|
return NETNSA_NSID_NOT_ASSIGNED;
|
|
spin_lock_bh(&net->nsid_lock);
|
|
spin_lock_bh(&net->nsid_lock);
|
|
- alloc = refcount_read(&peer->count) == 0 ? false : true;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * When peer is obtained from RCU lists, we may race with
|
|
|
|
+ * its cleanup. Check whether it's alive, and this guarantees
|
|
|
|
+ * we never hash a peer back to net->netns_ids, after it has
|
|
|
|
+ * just been idr_remove()'d from there in cleanup_net().
|
|
|
|
+ */
|
|
|
|
+ if (maybe_get_net(peer))
|
|
|
|
+ alive = alloc = true;
|
|
id = __peernet2id_alloc(net, peer, &alloc);
|
|
id = __peernet2id_alloc(net, peer, &alloc);
|
|
spin_unlock_bh(&net->nsid_lock);
|
|
spin_unlock_bh(&net->nsid_lock);
|
|
if (alloc && id >= 0)
|
|
if (alloc && id >= 0)
|
|
rtnl_net_notifyid(net, RTM_NEWNSID, id);
|
|
rtnl_net_notifyid(net, RTM_NEWNSID, id);
|
|
|
|
+ if (alive)
|
|
|
|
+ put_net(peer);
|
|
return id;
|
|
return id;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(peernet2id_alloc);
|
|
EXPORT_SYMBOL_GPL(peernet2id_alloc);
|