|
@@ -1762,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|
|
unsigned long data_len, int noblock,
|
|
|
int *errcode, int max_page_order)
|
|
|
{
|
|
|
- struct sk_buff *skb = NULL;
|
|
|
- unsigned long chunk;
|
|
|
- gfp_t gfp_mask;
|
|
|
+ struct sk_buff *skb;
|
|
|
long timeo;
|
|
|
int err;
|
|
|
- int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
|
- struct page *page;
|
|
|
- int i;
|
|
|
-
|
|
|
- err = -EMSGSIZE;
|
|
|
- if (npages > MAX_SKB_FRAGS)
|
|
|
- goto failure;
|
|
|
|
|
|
timeo = sock_sndtimeo(sk, noblock);
|
|
|
- while (!skb) {
|
|
|
+ for (;;) {
|
|
|
err = sock_error(sk);
|
|
|
if (err != 0)
|
|
|
goto failure;
|
|
@@ -1785,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|
|
if (sk->sk_shutdown & SEND_SHUTDOWN)
|
|
|
goto failure;
|
|
|
|
|
|
- if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
|
|
|
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
|
|
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
|
- err = -EAGAIN;
|
|
|
- if (!timeo)
|
|
|
- goto failure;
|
|
|
- if (signal_pending(current))
|
|
|
- goto interrupted;
|
|
|
- timeo = sock_wait_for_wmem(sk, timeo);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- err = -ENOBUFS;
|
|
|
- gfp_mask = sk->sk_allocation;
|
|
|
- if (gfp_mask & __GFP_WAIT)
|
|
|
- gfp_mask |= __GFP_REPEAT;
|
|
|
+ if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
|
|
|
+ break;
|
|
|
|
|
|
- skb = alloc_skb(header_len, gfp_mask);
|
|
|
- if (!skb)
|
|
|
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
|
|
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
|
+ err = -EAGAIN;
|
|
|
+ if (!timeo)
|
|
|
goto failure;
|
|
|
-
|
|
|
- skb->truesize += data_len;
|
|
|
-
|
|
|
- for (i = 0; npages > 0; i++) {
|
|
|
- int order = max_page_order;
|
|
|
-
|
|
|
- while (order) {
|
|
|
- if (npages >= 1 << order) {
|
|
|
- page = alloc_pages(sk->sk_allocation |
|
|
|
- __GFP_COMP |
|
|
|
- __GFP_NOWARN |
|
|
|
- __GFP_NORETRY,
|
|
|
- order);
|
|
|
- if (page)
|
|
|
- goto fill_page;
|
|
|
- /* Do not retry other high order allocations */
|
|
|
- order = 1;
|
|
|
- max_page_order = 0;
|
|
|
- }
|
|
|
- order--;
|
|
|
- }
|
|
|
- page = alloc_page(sk->sk_allocation);
|
|
|
- if (!page)
|
|
|
- goto failure;
|
|
|
-fill_page:
|
|
|
- chunk = min_t(unsigned long, data_len,
|
|
|
- PAGE_SIZE << order);
|
|
|
- skb_fill_page_desc(skb, i, page, 0, chunk);
|
|
|
- data_len -= chunk;
|
|
|
- npages -= 1 << order;
|
|
|
- }
|
|
|
+ if (signal_pending(current))
|
|
|
+ goto interrupted;
|
|
|
+ timeo = sock_wait_for_wmem(sk, timeo);
|
|
|
}
|
|
|
-
|
|
|
- skb_set_owner_w(skb, sk);
|
|
|
+ skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
|
|
|
+ errcode, sk->sk_allocation);
|
|
|
+ if (skb)
|
|
|
+ skb_set_owner_w(skb, sk);
|
|
|
return skb;
|
|
|
|
|
|
interrupted:
|
|
|
err = sock_intr_errno(timeo);
|
|
|
failure:
|
|
|
- kfree_skb(skb);
|
|
|
*errcode = err;
|
|
|
return NULL;
|
|
|
}
|