|
@@ -125,6 +125,24 @@ static inline u32 netlink_group_mask(u32 group)
|
|
|
return group ? 1 << (group - 1) : 0;
|
|
|
}
|
|
|
|
|
|
+static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
|
|
|
+ gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ unsigned int len = skb_end_offset(skb);
|
|
|
+ struct sk_buff *new;
|
|
|
+
|
|
|
+ new = alloc_skb(len, gfp_mask);
|
|
|
+ if (new == NULL)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
|
|
|
+ NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
|
|
|
+ NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
|
|
|
+
|
|
|
+ memcpy(skb_put(new, len), skb->data, len);
|
|
|
+ return new;
|
|
|
+}
|
|
|
+
|
|
|
int netlink_add_tap(struct netlink_tap *nt)
|
|
|
{
|
|
|
if (unlikely(nt->dev->type != ARPHRD_NETLINK))
|
|
@@ -206,7 +224,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
dev_hold(dev);
|
|
|
- nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
+
|
|
|
+ if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
|
|
|
+ nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
|
|
|
+ else
|
|
|
+ nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
if (nskb) {
|
|
|
nskb->dev = dev;
|
|
|
nskb->protocol = htons((u16) sk->sk_protocol);
|
|
@@ -279,11 +301,6 @@ static void netlink_rcv_wake(struct sock *sk)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NETLINK_MMAP
|
|
|
-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
|
|
|
-{
|
|
|
- return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
|
|
|
-}
|
|
|
-
|
|
|
static bool netlink_rx_is_mmaped(struct sock *sk)
|
|
|
{
|
|
|
return nlk_sk(sk)->rx_ring.pg_vec != NULL;
|
|
@@ -846,7 +863,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
|
|
|
}
|
|
|
|
|
|
#else /* CONFIG_NETLINK_MMAP */
|
|
|
-#define netlink_skb_is_mmaped(skb) false
|
|
|
#define netlink_rx_is_mmaped(sk) false
|
|
|
#define netlink_tx_is_mmaped(sk) false
|
|
|
#define netlink_mmap sock_no_mmap
|