|
@@ -390,7 +390,7 @@ int dccp_retransmit_skb(struct sock *sk)
|
|
|
return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
|
|
|
}
|
|
|
|
|
|
-struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
|
|
|
+struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
|
|
|
struct request_sock *req)
|
|
|
{
|
|
|
struct dccp_hdr *dh;
|
|
@@ -398,13 +398,18 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
|
|
|
const u32 dccp_header_size = sizeof(struct dccp_hdr) +
|
|
|
sizeof(struct dccp_hdr_ext) +
|
|
|
sizeof(struct dccp_hdr_response);
|
|
|
- struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
|
|
|
- GFP_ATOMIC);
|
|
|
- if (skb == NULL)
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ /* sk is marked const to clearly express we dont hold socket lock.
|
|
|
+ * sock_wmalloc() will atomically change sk->sk_wmem_alloc,
|
|
|
+ * it is safe to promote sk to non const.
|
|
|
+ */
|
|
|
+ skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
|
|
|
+ GFP_ATOMIC);
|
|
|
+ if (!skb)
|
|
|
return NULL;
|
|
|
|
|
|
- /* Reserve space for headers. */
|
|
|
- skb_reserve(skb, sk->sk_prot->max_header);
|
|
|
+ skb_reserve(skb, MAX_DCCP_HEADER);
|
|
|
|
|
|
skb_dst_set(skb, dst_clone(dst));
|
|
|
|