|
@@ -85,24 +85,23 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
|
|
|
struct request_sock *req;
|
|
|
|
|
|
req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
|
|
|
-
|
|
|
- if (req) {
|
|
|
- req->rsk_ops = ops;
|
|
|
- if (attach_listener) {
|
|
|
- sock_hold(sk_listener);
|
|
|
- req->rsk_listener = sk_listener;
|
|
|
- } else {
|
|
|
- req->rsk_listener = NULL;
|
|
|
+ if (!req)
|
|
|
+ return NULL;
|
|
|
+ req->rsk_listener = NULL;
|
|
|
+ if (attach_listener) {
|
|
|
+ if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
|
|
|
+ kmem_cache_free(ops->slab, req);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
- req_to_sk(req)->sk_prot = sk_listener->sk_prot;
|
|
|
- sk_node_init(&req_to_sk(req)->sk_node);
|
|
|
- sk_tx_queue_clear(req_to_sk(req));
|
|
|
- req->saved_syn = NULL;
|
|
|
- /* Following is temporary. It is coupled with debugging
|
|
|
- * helpers in reqsk_put() & reqsk_free()
|
|
|
- */
|
|
|
- atomic_set(&req->rsk_refcnt, 0);
|
|
|
+ req->rsk_listener = sk_listener;
|
|
|
}
|
|
|
+ req->rsk_ops = ops;
|
|
|
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
|
|
|
+ sk_node_init(&req_to_sk(req)->sk_node);
|
|
|
+ sk_tx_queue_clear(req_to_sk(req));
|
|
|
+ req->saved_syn = NULL;
|
|
|
+ atomic_set(&req->rsk_refcnt, 0);
|
|
|
+
|
|
|
return req;
|
|
|
}
|
|
|
|