|
@@ -525,14 +525,14 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
|
|
|
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
|
|
|
{
|
|
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
|
|
|
struct page *p_start, *p_end;
|
|
|
|
|
|
/* First page is flushed through netlink_{get,set}_status */
|
|
|
p_start = pgvec_to_page(hdr + PAGE_SIZE);
|
|
|
- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
|
|
|
+ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
|
|
|
while (p_start <= p_end) {
|
|
|
flush_dcache_page(p_start);
|
|
|
p_start++;
|
|
@@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
|
|
|
struct nl_mmap_hdr *hdr;
|
|
|
struct sk_buff *skb;
|
|
|
unsigned int maxlen;
|
|
|
- bool excl = true;
|
|
|
int err = 0, len = 0;
|
|
|
|
|
|
- /* Netlink messages are validated by the receiver before processing.
|
|
|
- * In order to avoid userspace changing the contents of the message
|
|
|
- * after validation, the socket and the ring may only be used by a
|
|
|
- * single process, otherwise we fall back to copying.
|
|
|
- */
|
|
|
- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
|
|
|
- atomic_read(&nlk->mapped) > 1)
|
|
|
- excl = false;
|
|
|
-
|
|
|
mutex_lock(&nlk->pg_vec_lock);
|
|
|
|
|
|
ring = &nlk->tx_ring;
|
|
|
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
|
|
|
|
|
|
do {
|
|
|
+ unsigned int nm_len;
|
|
|
+
|
|
|
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
|
|
|
if (hdr == NULL) {
|
|
|
if (!(msg->msg_flags & MSG_DONTWAIT) &&
|
|
@@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
|
|
|
schedule();
|
|
|
continue;
|
|
|
}
|
|
|
- if (hdr->nm_len > maxlen) {
|
|
|
+
|
|
|
+ nm_len = ACCESS_ONCE(hdr->nm_len);
|
|
|
+ if (nm_len > maxlen) {
|
|
|
err = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- netlink_frame_flush_dcache(hdr);
|
|
|
+ netlink_frame_flush_dcache(hdr, nm_len);
|
|
|
|
|
|
- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
|
|
|
- skb = alloc_skb_head(GFP_KERNEL);
|
|
|
- if (skb == NULL) {
|
|
|
- err = -ENOBUFS;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- sock_hold(sk);
|
|
|
- netlink_ring_setup_skb(skb, sk, ring, hdr);
|
|
|
- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
|
|
|
- __skb_put(skb, hdr->nm_len);
|
|
|
- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
|
|
|
- atomic_inc(&ring->pending);
|
|
|
- } else {
|
|
|
- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
|
|
|
- if (skb == NULL) {
|
|
|
- err = -ENOBUFS;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- __skb_put(skb, hdr->nm_len);
|
|
|
- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
|
|
|
- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
|
|
|
+ skb = alloc_skb(nm_len, GFP_KERNEL);
|
|
|
+ if (skb == NULL) {
|
|
|
+ err = -ENOBUFS;
|
|
|
+ goto out;
|
|
|
}
|
|
|
+ __skb_put(skb, nm_len);
|
|
|
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
|
|
|
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
|
|
|
|
|
|
netlink_increment_head(ring);
|
|
|
|
|
@@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
|
|
|
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
|
|
|
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
|
|
|
- netlink_frame_flush_dcache(hdr);
|
|
|
+ netlink_frame_flush_dcache(hdr, hdr->nm_len);
|
|
|
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
|
|
|
|
|
|
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
|