|
@@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
|
|
sk_mem_charge(sk, skb->truesize);
|
|
|
}
|
|
|
|
|
|
-static void netlink_sock_destruct(struct sock *sk)
|
|
|
+static void __netlink_sock_destruct(struct sock *sk)
|
|
|
{
|
|
|
struct netlink_sock *nlk = nlk_sk(sk);
|
|
|
|
|
|
if (nlk->cb_running) {
|
|
|
- if (nlk->cb.done)
|
|
|
- nlk->cb.done(&nlk->cb);
|
|
|
-
|
|
|
module_put(nlk->cb.module);
|
|
|
kfree_skb(nlk->cb.skb);
|
|
|
}
|
|
@@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk)
|
|
|
WARN_ON(nlk_sk(sk)->groups);
|
|
|
}
|
|
|
|
|
|
+static void netlink_sock_destruct_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct netlink_sock *nlk = container_of(work, struct netlink_sock,
|
|
|
+ work);
|
|
|
+
|
|
|
+ nlk->cb.done(&nlk->cb);
|
|
|
+ __netlink_sock_destruct(&nlk->sk);
|
|
|
+}
|
|
|
+
|
|
|
+static void netlink_sock_destruct(struct sock *sk)
|
|
|
+{
|
|
|
+ struct netlink_sock *nlk = nlk_sk(sk);
|
|
|
+
|
|
|
+ if (nlk->cb_running && nlk->cb.done) {
|
|
|
+ INIT_WORK(&nlk->work, netlink_sock_destruct_work);
|
|
|
+ schedule_work(&nlk->work);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ __netlink_sock_destruct(sk);
|
|
|
+}
|
|
|
+
|
|
|
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
|
|
|
* SMP. Look, when several writers sleep and reader wakes them up, all but one
|
|
|
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
|