Browse Source

Merge branch 'net-refcount_t'

Elena Reshetova says:

====================
v3 net generic subsystem refcount conversions

Changes in v3:
Rebased on top of the net-next tree.

Changes in v2:
No changes in patches apart from rebases, but now by
default refcount_t = atomic_t (*) and uses all atomic standard operations
unless CONFIG_REFCOUNT_FULL is enabled. This is a compromise for the
systems that are critical on performance (such as net) and cannot accept even
slight delay on the refcounter operations.

This series, for core network subsystem components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.
These patches contain only generic net pieces. Other changes will be sent separately.

The patches are fully independent and can be cherry-picked separately.
The big patches, such as conversions for sock structure, need a very detailed
look from maintainers: refcount managing is quite complex in them and while
it seems that they would benefit from the change, extra checking is needed.
The biggest corner issue is the fact that refcount_inc() does not increment
from zero.

If there are no objections to the patches, please merge them via respective trees.

* The respective change is currently merged into -next as
  "locking/refcount: Create unchecked atomic_t implementation".
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 years ago
parent
commit
0b58e6db0f
100 changed files with 307 additions and 308 deletions
  1. 1 1
      crypto/algif_aead.c
  2. 1 11
      drivers/atm/fore200e.c
  3. 1 1
      drivers/atm/he.c
  4. 2 2
      drivers/atm/idt77252.c
  5. 2 2
      drivers/infiniband/hw/nes/nes_cm.c
  6. 1 1
      drivers/isdn/mISDN/socket.c
  7. 1 1
      drivers/net/rionet.c
  8. 13 13
      drivers/s390/net/ctcm_main.c
  9. 5 5
      drivers/s390/net/netiucv.c
  10. 2 2
      drivers/s390/net/qeth_core_main.c
  11. 1 1
      include/linux/atmdev.h
  12. 2 1
      include/linux/igmp.h
  13. 6 5
      include/linux/inetdevice.h
  14. 2 1
      include/linux/netpoll.h
  15. 10 10
      include/linux/skbuff.h
  16. 2 1
      include/net/af_unix.h
  17. 1 1
      include/net/arp.h
  18. 4 3
      include/net/fib_rules.h
  19. 2 2
      include/net/inet_frag.h
  20. 2 2
      include/net/inet_hashtables.h
  21. 2 2
      include/net/inetpeer.h
  22. 1 1
      include/net/ndisc.h
  23. 8 7
      include/net/neighbour.h
  24. 2 1
      include/net/net_namespace.h
  25. 1 1
      include/net/netfilter/br_netfilter.h
  26. 4 4
      include/net/netlabel.h
  27. 5 4
      include/net/request_sock.h
  28. 13 12
      include/net/sock.h
  29. 1 1
      net/atm/br2684.c
  30. 4 4
      net/atm/clip.c
  31. 5 5
      net/atm/common.c
  32. 2 2
      net/atm/lec.c
  33. 2 2
      net/atm/mpc.c
  34. 1 1
      net/atm/pppoatm.c
  35. 1 1
      net/atm/proc.c
  36. 1 1
      net/atm/raw.c
  37. 1 1
      net/atm/signaling.c
  38. 1 1
      net/bluetooth/af_bluetooth.c
  39. 1 1
      net/bluetooth/rfcomm/sock.c
  40. 2 2
      net/bridge/br_netfilter_hooks.c
  41. 1 1
      net/caif/caif_socket.c
  42. 3 3
      net/core/datagram.c
  43. 5 5
      net/core/dev.c
  44. 2 2
      net/core/fib_rules.c
  45. 11 11
      net/core/neighbour.c
  46. 1 1
      net/core/net-sysfs.c
  47. 2 2
      net/core/net_namespace.c
  48. 5 5
      net/core/netpoll.c
  49. 8 8
      net/core/pktgen.c
  50. 1 1
      net/core/rtnetlink.c
  51. 13 13
      net/core/skbuff.c
  52. 16 16
      net/core/sock.c
  53. 1 1
      net/dccp/ipv6.c
  54. 1 1
      net/decnet/dn_neigh.c
  55. 1 1
      net/ipv4/af_inet.c
  56. 2 2
      net/ipv4/cipso_ipv4.c
  57. 1 1
      net/ipv4/devinet.c
  58. 1 1
      net/ipv4/esp4.c
  59. 5 5
      net/ipv4/igmp.c
  60. 1 1
      net/ipv4/inet_connection_sock.c
  61. 7 7
      net/ipv4/inet_fragment.c
  62. 2 2
      net/ipv4/inet_hashtables.c
  63. 4 4
      net/ipv4/inet_timewait_sock.c
  64. 9 9
      net/ipv4/inetpeer.c
  65. 1 1
      net/ipv4/ip_fragment.c
  66. 3 3
      net/ipv4/ip_output.c
  67. 2 2
      net/ipv4/ping.c
  68. 1 1
      net/ipv4/raw.c
  69. 1 1
      net/ipv4/syncookies.c
  70. 2 2
      net/ipv4/tcp.c
  71. 1 1
      net/ipv4/tcp_fastopen.c
  72. 2 2
      net/ipv4/tcp_ipv4.c
  73. 1 1
      net/ipv4/tcp_offload.c
  74. 7 8
      net/ipv4/tcp_output.c
  75. 3 3
      net/ipv4/udp.c
  76. 2 2
      net/ipv4/udp_diag.c
  77. 2 2
      net/ipv6/calipso.c
  78. 1 1
      net/ipv6/datagram.c
  79. 1 1
      net/ipv6/esp6.c
  80. 2 2
      net/ipv6/inet6_hashtables.c
  81. 2 2
      net/ipv6/ip6_output.c
  82. 1 1
      net/ipv6/syncookies.c
  83. 3 3
      net/ipv6/tcp_ipv6.c
  84. 2 2
      net/ipv6/udp.c
  85. 1 1
      net/kcm/kcmproc.c
  86. 4 4
      net/key/af_key.c
  87. 1 2
      net/l2tp/l2tp_debugfs.c
  88. 4 4
      net/llc/llc_conn.c
  89. 1 1
      net/llc/llc_sap.c
  90. 2 2
      net/netfilter/xt_TPROXY.c
  91. 7 7
      net/netlink/af_netlink.c
  92. 7 7
      net/packet/af_packet.c
  93. 3 1
      net/packet/internal.h
  94. 2 2
      net/phonet/socket.c
  95. 1 1
      net/rds/tcp_send.c
  96. 3 3
      net/rxrpc/af_rxrpc.c
  97. 6 6
      net/rxrpc/skbuff.c
  98. 1 1
      net/sched/em_meta.c
  99. 1 1
      net/sched/sch_atm.c
  100. 1 1
      net/sctp/output.c

+ 1 - 1
crypto/algif_aead.c

@@ -877,7 +877,7 @@ static void aead_sock_destruct(struct sock *sk)
 	unsigned int ivlen = crypto_aead_ivsize(
 	unsigned int ivlen = crypto_aead_ivsize(
 				crypto_aead_reqtfm(&ctx->aead_req));
 				crypto_aead_reqtfm(&ctx->aead_req));
 
 
-	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
+	WARN_ON(refcount_read(&sk->sk_refcnt) != 0);
 	aead_put_sgl(sk);
 	aead_put_sgl(sk);
 	sock_kzfree_s(sk, ctx->iv, ivlen);
 	sock_kzfree_s(sk, ctx->iv, ivlen);
 	sock_kfree_s(sk, ctx, ctx->len);
 	sock_kfree_s(sk, ctx, ctx->len);

+ 1 - 11
drivers/atm/fore200e.c

@@ -924,12 +924,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
 		else {
 		else {
 		    dev_kfree_skb_any(entry->skb);
 		    dev_kfree_skb_any(entry->skb);
 		}
 		}
-#if 1
-		/* race fixed by the above incarnation mechanism, but... */
-		if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
-		    atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
-		}
-#endif
+
 		/* check error condition */
 		/* check error condition */
 		if (*entry->status & STATUS_ERROR)
 		if (*entry->status & STATUS_ERROR)
 		    atomic_inc(&vcc->stats->tx_err);
 		    atomic_inc(&vcc->stats->tx_err);
@@ -1130,13 +1125,9 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
 	return -ENOMEM;
 	return -ENOMEM;
     }
     }
 
 
-    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
     vcc->push(vcc, skb);
     vcc->push(vcc, skb);
     atomic_inc(&vcc->stats->rx);
     atomic_inc(&vcc->stats->rx);
 
 
-    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
     return 0;
     return 0;
 }
 }
 
 
@@ -1572,7 +1563,6 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
     unsigned long           flags;
     unsigned long           flags;
 
 
     ASSERT(vcc);
     ASSERT(vcc);
-    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
     ASSERT(fore200e);
     ASSERT(fore200e);
     ASSERT(fore200e_vcc);
     ASSERT(fore200e_vcc);
 
 

+ 1 - 1
drivers/atm/he.c

@@ -2395,7 +2395,7 @@ he_close(struct atm_vcc *vcc)
 		 * TBRQ, the host issues the close command to the adapter.
 		 * TBRQ, the host issues the close command to the adapter.
 		 */
 		 */
 
 
-		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
+		while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
 		       (retry < MAX_RETRY)) {
 		       (retry < MAX_RETRY)) {
 			msleep(sleep);
 			msleep(sleep);
 			if (sleep < 250)
 			if (sleep < 250)

+ 2 - 2
drivers/atm/idt77252.c

@@ -724,7 +724,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
 		struct sock *sk = sk_atm(vcc);
 		struct sock *sk = sk_atm(vcc);
 
 
 		vc->estimator->cells += (skb->len + 47) / 48;
 		vc->estimator->cells += (skb->len + 47) / 48;
-		if (atomic_read(&sk->sk_wmem_alloc) >
+		if (refcount_read(&sk->sk_wmem_alloc) >
 		    (sk->sk_sndbuf >> 1)) {
 		    (sk->sk_sndbuf >> 1)) {
 			u32 cps = vc->estimator->maxcps;
 			u32 cps = vc->estimator->maxcps;
 
 
@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
 		atomic_inc(&vcc->stats->tx_err);
 		atomic_inc(&vcc->stats->tx_err);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
 
 
 	skb_put_data(skb, cell, 52);
 	skb_put_data(skb, cell, 52);
 
 

+ 2 - 2
drivers/infiniband/hw/nes/nes_cm.c

@@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 
 
 	if (type == NES_TIMER_TYPE_SEND) {
 	if (type == NES_TIMER_TYPE_SEND) {
 		new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
 		new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
-		atomic_inc(&new_send->skb->users);
+		refcount_inc(&new_send->skb->users);
 		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
 		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
 		cm_node->send_entry = new_send;
 		cm_node->send_entry = new_send;
 		add_ref_cm_node(cm_node);
 		add_ref_cm_node(cm_node);
@@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
 						  flags);
 						  flags);
 				break;
 				break;
 			}
 			}
-			atomic_inc(&send_entry->skb->users);
+			refcount_inc(&send_entry->skb->users);
 			cm_packets_retrans++;
 			cm_packets_retrans++;
 			nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
 			nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
 				  "for node %p, jiffies = %lu, time to send = "
 				  "for node %p, jiffies = %lu, time to send = "

+ 1 - 1
drivers/isdn/mISDN/socket.c

@@ -155,7 +155,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	copied = skb->len + MISDN_HEADER_LEN;
 	copied = skb->len + MISDN_HEADER_LEN;
 	if (len < copied) {
 	if (len < copied) {
 		if (flags & MSG_PEEK)
 		if (flags & MSG_PEEK)
-			atomic_dec(&skb->users);
+			refcount_dec(&skb->users);
 		else
 		else
 			skb_queue_head(&sk->sk_receive_queue, skb);
 			skb_queue_head(&sk->sk_receive_queue, skb);
 		return -ENOSPC;
 		return -ENOSPC;

+ 1 - 1
drivers/net/rionet.c

@@ -201,7 +201,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 				rionet_queue_tx_msg(skb, ndev,
 				rionet_queue_tx_msg(skb, ndev,
 					nets[rnet->mport->id].active[i]);
 					nets[rnet->mport->id].active[i]);
 				if (count)
 				if (count)
-					atomic_inc(&skb->users);
+					refcount_inc(&skb->users);
 				count++;
 				count++;
 			}
 			}
 	} else if (RIONET_MAC_MATCH(eth->h_dest)) {
 	} else if (RIONET_MAC_MATCH(eth->h_dest)) {

+ 13 - 13
drivers/s390/net/ctcm_main.c

@@ -483,7 +483,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
 			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
 			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
 			return -EBUSY;
 			return -EBUSY;
 		} else {
 		} else {
-			atomic_inc(&skb->users);
+			refcount_inc(&skb->users);
 			header.length = l;
 			header.length = l;
 			header.type = be16_to_cpu(skb->protocol);
 			header.type = be16_to_cpu(skb->protocol);
 			header.unused = 0;
 			header.unused = 0;
@@ -500,7 +500,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
 	 * Protect skb against beeing free'd by upper
 	 * Protect skb against beeing free'd by upper
 	 * layers.
 	 * layers.
 	 */
 	 */
-	atomic_inc(&skb->users);
+	refcount_inc(&skb->users);
 	ch->prof.txlen += skb->len;
 	ch->prof.txlen += skb->len;
 	header.length = skb->len + LL_HEADER_LENGTH;
 	header.length = skb->len + LL_HEADER_LENGTH;
 	header.type = be16_to_cpu(skb->protocol);
 	header.type = be16_to_cpu(skb->protocol);
@@ -517,14 +517,14 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
 	if (hi) {
 	if (hi) {
 		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
 		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
 		if (!nskb) {
 		if (!nskb) {
-			atomic_dec(&skb->users);
+			refcount_dec(&skb->users);
 			skb_pull(skb, LL_HEADER_LENGTH + 2);
 			skb_pull(skb, LL_HEADER_LENGTH + 2);
 			ctcm_clear_busy(ch->netdev);
 			ctcm_clear_busy(ch->netdev);
 			return -ENOMEM;
 			return -ENOMEM;
 		} else {
 		} else {
 			skb_put_data(nskb, skb->data, skb->len);
 			skb_put_data(nskb, skb->data, skb->len);
-			atomic_inc(&nskb->users);
-			atomic_dec(&skb->users);
+			refcount_inc(&nskb->users);
+			refcount_dec(&skb->users);
 			dev_kfree_skb_irq(skb);
 			dev_kfree_skb_irq(skb);
 			skb = nskb;
 			skb = nskb;
 		}
 		}
@@ -542,7 +542,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
 			 * Remove our header. It gets added
 			 * Remove our header. It gets added
 			 * again on retransmit.
 			 * again on retransmit.
 			 */
 			 */
-			atomic_dec(&skb->users);
+			refcount_dec(&skb->users);
 			skb_pull(skb, LL_HEADER_LENGTH + 2);
 			skb_pull(skb, LL_HEADER_LENGTH + 2);
 			ctcm_clear_busy(ch->netdev);
 			ctcm_clear_busy(ch->netdev);
 			return -ENOMEM;
 			return -ENOMEM;
@@ -553,7 +553,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
 		ch->ccw[1].count = skb->len;
 		ch->ccw[1].count = skb->len;
 		skb_copy_from_linear_data(skb,
 		skb_copy_from_linear_data(skb,
 				skb_put(ch->trans_skb, skb->len), skb->len);
 				skb_put(ch->trans_skb, skb->len), skb->len);
-		atomic_dec(&skb->users);
+		refcount_dec(&skb->users);
 		dev_kfree_skb_irq(skb);
 		dev_kfree_skb_irq(skb);
 		ccw_idx = 0;
 		ccw_idx = 0;
 	} else {
 	} else {
@@ -679,7 +679,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
 
 
 	if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
 	if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
 		spin_lock_irqsave(&ch->collect_lock, saveflags);
 		spin_lock_irqsave(&ch->collect_lock, saveflags);
-		atomic_inc(&skb->users);
+		refcount_inc(&skb->users);
 		p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
 		p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
 
 
 		if (!p_header) {
 		if (!p_header) {
@@ -716,7 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
 	 * Protect skb against beeing free'd by upper
 	 * Protect skb against beeing free'd by upper
 	 * layers.
 	 * layers.
 	 */
 	 */
-	atomic_inc(&skb->users);
+	refcount_inc(&skb->users);
 
 
 	/*
 	/*
 	 * IDAL support in CTCM is broken, so we have to
 	 * IDAL support in CTCM is broken, so we have to
@@ -729,8 +729,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
 			goto nomem_exit;
 			goto nomem_exit;
 		} else {
 		} else {
 			skb_put_data(nskb, skb->data, skb->len);
 			skb_put_data(nskb, skb->data, skb->len);
-			atomic_inc(&nskb->users);
-			atomic_dec(&skb->users);
+			refcount_inc(&nskb->users);
+			refcount_dec(&skb->users);
 			dev_kfree_skb_irq(skb);
 			dev_kfree_skb_irq(skb);
 			skb = nskb;
 			skb = nskb;
 		}
 		}
@@ -810,7 +810,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
 		ch->trans_skb->len = 0;
 		ch->trans_skb->len = 0;
 		ch->ccw[1].count = skb->len;
 		ch->ccw[1].count = skb->len;
 		skb_put_data(ch->trans_skb, skb->data, skb->len);
 		skb_put_data(ch->trans_skb, skb->data, skb->len);
-		atomic_dec(&skb->users);
+		refcount_dec(&skb->users);
 		dev_kfree_skb_irq(skb);
 		dev_kfree_skb_irq(skb);
 		ccw_idx = 0;
 		ccw_idx = 0;
 		CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
 		CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
@@ -855,7 +855,7 @@ nomem_exit:
 			"%s(%s): MEMORY allocation ERROR\n",
 			"%s(%s): MEMORY allocation ERROR\n",
 			CTCM_FUNTAIL, ch->id);
 			CTCM_FUNTAIL, ch->id);
 	rc = -ENOMEM;
 	rc = -ENOMEM;
-	atomic_dec(&skb->users);
+	refcount_dec(&skb->users);
 	dev_kfree_skb_any(skb);
 	dev_kfree_skb_any(skb);
 	fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
 	fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
 done:
 done:

+ 5 - 5
drivers/s390/net/netiucv.c

@@ -743,7 +743,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 	conn->prof.tx_pending--;
 	conn->prof.tx_pending--;
 	if (single_flag) {
 	if (single_flag) {
 		if ((skb = skb_dequeue(&conn->commit_queue))) {
 		if ((skb = skb_dequeue(&conn->commit_queue))) {
-			atomic_dec(&skb->users);
+			refcount_dec(&skb->users);
 			if (privptr) {
 			if (privptr) {
 				privptr->stats.tx_packets++;
 				privptr->stats.tx_packets++;
 				privptr->stats.tx_bytes +=
 				privptr->stats.tx_bytes +=
@@ -766,7 +766,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 		txbytes += skb->len;
 		txbytes += skb->len;
 		txpackets++;
 		txpackets++;
 		stat_maxcq++;
 		stat_maxcq++;
-		atomic_dec(&skb->users);
+		refcount_dec(&skb->users);
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
 	}
 	}
 	if (conn->collect_len > conn->prof.maxmulti)
 	if (conn->collect_len > conn->prof.maxmulti)
@@ -958,7 +958,7 @@ static void netiucv_purge_skb_queue(struct sk_buff_head *q)
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 
 
 	while ((skb = skb_dequeue(q))) {
 	while ((skb = skb_dequeue(q))) {
-		atomic_dec(&skb->users);
+		refcount_dec(&skb->users);
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
 	}
 	}
 }
 }
@@ -1176,7 +1176,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
 			IUCV_DBF_TEXT(data, 2,
 			IUCV_DBF_TEXT(data, 2,
 				      "EBUSY from netiucv_transmit_skb\n");
 				      "EBUSY from netiucv_transmit_skb\n");
 		} else {
 		} else {
-			atomic_inc(&skb->users);
+			refcount_inc(&skb->users);
 			skb_queue_tail(&conn->collect_queue, skb);
 			skb_queue_tail(&conn->collect_queue, skb);
 			conn->collect_len += l;
 			conn->collect_len += l;
 			rc = 0;
 			rc = 0;
@@ -1245,7 +1245,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
 		} else {
 		} else {
 			if (copied)
 			if (copied)
 				dev_kfree_skb(skb);
 				dev_kfree_skb(skb);
-			atomic_inc(&nskb->users);
+			refcount_inc(&nskb->users);
 			skb_queue_tail(&conn->commit_queue, nskb);
 			skb_queue_tail(&conn->commit_queue, nskb);
 		}
 		}
 	}
 	}

+ 2 - 2
drivers/s390/net/qeth_core_main.c

@@ -1242,7 +1242,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 				iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
 				iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
 			}
 			}
 		}
 		}
-		atomic_dec(&skb->users);
+		refcount_dec(&skb->users);
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
 		skb = skb_dequeue(&buf->skb_list);
 		skb = skb_dequeue(&buf->skb_list);
 	}
 	}
@@ -3975,7 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
 	int flush_cnt = 0, hdr_len, large_send = 0;
 	int flush_cnt = 0, hdr_len, large_send = 0;
 
 
 	buffer = buf->buffer;
 	buffer = buf->buffer;
-	atomic_inc(&skb->users);
+	refcount_inc(&skb->users);
 	skb_queue_tail(&buf->skb_list, skb);
 	skb_queue_tail(&buf->skb_list, skb);
 
 
 	/*check first on TSO ....*/
 	/*check first on TSO ....*/

+ 1 - 1
include/linux/atmdev.h

@@ -254,7 +254,7 @@ static inline void atm_return(struct atm_vcc *vcc,int truesize)
 
 
 static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
 static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
 {
 {
-	return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) <
+	return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) <
 	       sk_atm(vcc)->sk_sndbuf;
 	       sk_atm(vcc)->sk_sndbuf;
 }
 }
 
 

+ 2 - 1
include/linux/igmp.h

@@ -18,6 +18,7 @@
 #include <linux/skbuff.h>
 #include <linux/skbuff.h>
 #include <linux/timer.h>
 #include <linux/timer.h>
 #include <linux/in.h>
 #include <linux/in.h>
+#include <linux/refcount.h>
 #include <uapi/linux/igmp.h>
 #include <uapi/linux/igmp.h>
 
 
 static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
 static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
@@ -84,7 +85,7 @@ struct ip_mc_list {
 	struct ip_mc_list __rcu *next_hash;
 	struct ip_mc_list __rcu *next_hash;
 	struct timer_list	timer;
 	struct timer_list	timer;
 	int			users;
 	int			users;
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 	spinlock_t		lock;
 	spinlock_t		lock;
 	char			tm_running;
 	char			tm_running;
 	char			reporter;
 	char			reporter;

+ 6 - 5
include/linux/inetdevice.h

@@ -11,6 +11,7 @@
 #include <linux/timer.h>
 #include <linux/timer.h>
 #include <linux/sysctl.h>
 #include <linux/sysctl.h>
 #include <linux/rtnetlink.h>
 #include <linux/rtnetlink.h>
+#include <linux/refcount.h>
 
 
 struct ipv4_devconf {
 struct ipv4_devconf {
 	void	*sysctl;
 	void	*sysctl;
@@ -22,7 +23,7 @@ struct ipv4_devconf {
 
 
 struct in_device {
 struct in_device {
 	struct net_device	*dev;
 	struct net_device	*dev;
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 	int			dead;
 	int			dead;
 	struct in_ifaddr	*ifa_list;	/* IP ifaddr chain		*/
 	struct in_ifaddr	*ifa_list;	/* IP ifaddr chain		*/
 
 
@@ -219,7 +220,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev)
 	rcu_read_lock();
 	rcu_read_lock();
 	in_dev = __in_dev_get_rcu(dev);
 	in_dev = __in_dev_get_rcu(dev);
 	if (in_dev)
 	if (in_dev)
-		atomic_inc(&in_dev->refcnt);
+		refcount_inc(&in_dev->refcnt);
 	rcu_read_unlock();
 	rcu_read_unlock();
 	return in_dev;
 	return in_dev;
 }
 }
@@ -240,12 +241,12 @@ void in_dev_finish_destroy(struct in_device *idev);
 
 
 static inline void in_dev_put(struct in_device *idev)
 static inline void in_dev_put(struct in_device *idev)
 {
 {
-	if (atomic_dec_and_test(&idev->refcnt))
+	if (refcount_dec_and_test(&idev->refcnt))
 		in_dev_finish_destroy(idev);
 		in_dev_finish_destroy(idev);
 }
 }
 
 
-#define __in_dev_put(idev)  atomic_dec(&(idev)->refcnt)
-#define in_dev_hold(idev)   atomic_inc(&(idev)->refcnt)
+#define __in_dev_put(idev)  refcount_dec(&(idev)->refcnt)
+#define in_dev_hold(idev)   refcount_inc(&(idev)->refcnt)
 
 
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 

+ 2 - 1
include/linux/netpoll.h

@@ -11,6 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/rcupdate.h>
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 #include <linux/list.h>
+#include <linux/refcount.h>
 
 
 union inet_addr {
 union inet_addr {
 	__u32		all[4];
 	__u32		all[4];
@@ -34,7 +35,7 @@ struct netpoll {
 };
 };
 
 
 struct netpoll_info {
 struct netpoll_info {
-	atomic_t refcnt;
+	refcount_t refcnt;
 
 
 	struct semaphore dev_lock;
 	struct semaphore dev_lock;
 
 

+ 10 - 10
include/linux/skbuff.h

@@ -252,7 +252,7 @@ struct nf_conntrack {
 
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 struct nf_bridge_info {
 struct nf_bridge_info {
-	atomic_t		use;
+	refcount_t		use;
 	enum {
 	enum {
 		BRNF_PROTO_UNCHANGED,
 		BRNF_PROTO_UNCHANGED,
 		BRNF_PROTO_8021Q,
 		BRNF_PROTO_8021Q,
@@ -761,7 +761,7 @@ struct sk_buff {
 	unsigned char		*head,
 	unsigned char		*head,
 				*data;
 				*data;
 	unsigned int		truesize;
 	unsigned int		truesize;
-	atomic_t		users;
+	refcount_t		users;
 };
 };
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
@@ -872,9 +872,9 @@ static inline bool skb_unref(struct sk_buff *skb)
 {
 {
 	if (unlikely(!skb))
 	if (unlikely(!skb))
 		return false;
 		return false;
-	if (likely(atomic_read(&skb->users) == 1))
+	if (likely(refcount_read(&skb->users) == 1))
 		smp_rmb();
 		smp_rmb();
-	else if (likely(!atomic_dec_and_test(&skb->users)))
+	else if (likely(!refcount_dec_and_test(&skb->users)))
 		return false;
 		return false;
 
 
 	return true;
 	return true;
@@ -915,7 +915,7 @@ struct sk_buff_fclones {
 
 
 	struct sk_buff	skb2;
 	struct sk_buff	skb2;
 
 
-	atomic_t	fclone_ref;
+	refcount_t	fclone_ref;
 };
 };
 
 
 /**
 /**
@@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
 	fclones = container_of(skb, struct sk_buff_fclones, skb1);
 	fclones = container_of(skb, struct sk_buff_fclones, skb1);
 
 
 	return skb->fclone == SKB_FCLONE_ORIG &&
 	return skb->fclone == SKB_FCLONE_ORIG &&
-	       atomic_read(&fclones->fclone_ref) > 1 &&
+	       refcount_read(&fclones->fclone_ref) > 1 &&
 	       fclones->skb2.sk == sk;
 	       fclones->skb2.sk == sk;
 }
 }
 
 
@@ -1283,7 +1283,7 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
  */
  */
 static inline struct sk_buff *skb_get(struct sk_buff *skb)
 static inline struct sk_buff *skb_get(struct sk_buff *skb)
 {
 {
-	atomic_inc(&skb->users);
+	refcount_inc(&skb->users);
 	return skb;
 	return skb;
 }
 }
 
 
@@ -1384,7 +1384,7 @@ static inline void __skb_header_release(struct sk_buff *skb)
  */
  */
 static inline int skb_shared(const struct sk_buff *skb)
 static inline int skb_shared(const struct sk_buff *skb)
 {
 {
-	return atomic_read(&skb->users) != 1;
+	return refcount_read(&skb->users) != 1;
 }
 }
 
 
 /**
 /**
@@ -3589,13 +3589,13 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
 {
 {
-	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
+	if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
 		kfree(nf_bridge);
 		kfree(nf_bridge);
 }
 }
 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
 {
 {
 	if (nf_bridge)
 	if (nf_bridge)
-		atomic_inc(&nf_bridge->use);
+		refcount_inc(&nf_bridge->use);
 }
 }
 #endif /* CONFIG_BRIDGE_NETFILTER */
 #endif /* CONFIG_BRIDGE_NETFILTER */
 static inline void nf_reset(struct sk_buff *skb)
 static inline void nf_reset(struct sk_buff *skb)

+ 2 - 1
include/net/af_unix.h

@@ -4,6 +4,7 @@
 #include <linux/socket.h>
 #include <linux/socket.h>
 #include <linux/un.h>
 #include <linux/un.h>
 #include <linux/mutex.h>
 #include <linux/mutex.h>
+#include <linux/refcount.h>
 #include <net/sock.h>
 #include <net/sock.h>
 
 
 void unix_inflight(struct user_struct *user, struct file *fp);
 void unix_inflight(struct user_struct *user, struct file *fp);
@@ -21,7 +22,7 @@ extern spinlock_t unix_table_lock;
 extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 
 
 struct unix_address {
 struct unix_address {
-	atomic_t	refcnt;
+	refcount_t	refcnt;
 	int		len;
 	int		len;
 	unsigned int	hash;
 	unsigned int	hash;
 	struct sockaddr_un name[0];
 	struct sockaddr_un name[0];

+ 1 - 1
include/net/arp.h

@@ -28,7 +28,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
 
 
 	rcu_read_lock_bh();
 	rcu_read_lock_bh();
 	n = __ipv4_neigh_lookup_noref(dev, key);
 	n = __ipv4_neigh_lookup_noref(dev, key);
-	if (n && !atomic_inc_not_zero(&n->refcnt))
+	if (n && !refcount_inc_not_zero(&n->refcnt))
 		n = NULL;
 		n = NULL;
 	rcu_read_unlock_bh();
 	rcu_read_unlock_bh();
 
 

+ 4 - 3
include/net/fib_rules.h

@@ -5,6 +5,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include <linux/fib_rules.h>
 #include <linux/fib_rules.h>
+#include <linux/refcount.h>
 #include <net/flow.h>
 #include <net/flow.h>
 #include <net/rtnetlink.h>
 #include <net/rtnetlink.h>
 
 
@@ -29,7 +30,7 @@ struct fib_rule {
 	struct fib_rule __rcu	*ctarget;
 	struct fib_rule __rcu	*ctarget;
 	struct net		*fr_net;
 	struct net		*fr_net;
 
 
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 	u32			pref;
 	u32			pref;
 	int			suppress_ifgroup;
 	int			suppress_ifgroup;
 	int			suppress_prefixlen;
 	int			suppress_prefixlen;
@@ -103,12 +104,12 @@ struct fib_rules_ops {
 
 
 static inline void fib_rule_get(struct fib_rule *rule)
 static inline void fib_rule_get(struct fib_rule *rule)
 {
 {
-	atomic_inc(&rule->refcnt);
+	refcount_inc(&rule->refcnt);
 }
 }
 
 
 static inline void fib_rule_put(struct fib_rule *rule)
 static inline void fib_rule_put(struct fib_rule *rule)
 {
 {
-	if (atomic_dec_and_test(&rule->refcnt))
+	if (refcount_dec_and_test(&rule->refcnt))
 		kfree_rcu(rule, rcu);
 		kfree_rcu(rule, rcu);
 }
 }
 
 

+ 2 - 2
include/net/inet_frag.h

@@ -50,7 +50,7 @@ struct inet_frag_queue {
 	spinlock_t		lock;
 	spinlock_t		lock;
 	struct timer_list	timer;
 	struct timer_list	timer;
 	struct hlist_node	list;
 	struct hlist_node	list;
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 	struct sk_buff		*fragments;
 	struct sk_buff		*fragments;
 	struct sk_buff		*fragments_tail;
 	struct sk_buff		*fragments_tail;
 	ktime_t			stamp;
 	ktime_t			stamp;
@@ -129,7 +129,7 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
 
 
 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
 {
 {
-	if (atomic_dec_and_test(&q->refcnt))
+	if (refcount_dec_and_test(&q->refcnt))
 		inet_frag_destroy(q, f);
 		inet_frag_destroy(q, f);
 }
 }
 
 

+ 2 - 2
include/net/inet_hashtables.h

@@ -32,7 +32,7 @@
 #include <net/tcp_states.h>
 #include <net/tcp_states.h>
 #include <net/netns/hash.h>
 #include <net/netns/hash.h>
 
 
-#include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 
 
 /* This is for all connections with a full identity, no wildcards.
 /* This is for all connections with a full identity, no wildcards.
@@ -334,7 +334,7 @@ static inline struct sock *inet_lookup(struct net *net,
 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
 			   dport, dif, &refcounted);
 			   dport, dif, &refcounted);
 
 
-	if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
 		sk = NULL;
 		sk = NULL;
 	return sk;
 	return sk;
 }
 }

+ 2 - 2
include/net/inetpeer.h

@@ -46,7 +46,7 @@ struct inet_peer {
 		struct rcu_head     gc_rcu;
 		struct rcu_head     gc_rcu;
 	};
 	};
 	/*
 	/*
-	 * Once inet_peer is queued for deletion (refcnt == -1), following field
+	 * Once inet_peer is queued for deletion (refcnt == 0), following field
 	 * is not available: rid
 	 * is not available: rid
 	 * We can share memory with rcu_head to help keep inet_peer small.
 	 * We can share memory with rcu_head to help keep inet_peer small.
 	 */
 	 */
@@ -60,7 +60,7 @@ struct inet_peer {
 
 
 	/* following fields might be frequently dirtied */
 	/* following fields might be frequently dirtied */
 	__u32			dtime;	/* the time of last use of not referenced entries */
 	__u32			dtime;	/* the time of last use of not referenced entries */
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 };
 };
 
 
 struct inet_peer_base {
 struct inet_peer_base {

+ 1 - 1
include/net/ndisc.h

@@ -384,7 +384,7 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
 
 
 	rcu_read_lock_bh();
 	rcu_read_lock_bh();
 	n = __ipv6_neigh_lookup_noref(dev, pkey);
 	n = __ipv6_neigh_lookup_noref(dev, pkey);
-	if (n && !atomic_inc_not_zero(&n->refcnt))
+	if (n && !refcount_inc_not_zero(&n->refcnt))
 		n = NULL;
 		n = NULL;
 	rcu_read_unlock_bh();
 	rcu_read_unlock_bh();
 
 

+ 8 - 7
include/net/neighbour.h

@@ -17,6 +17,7 @@
  */
  */
 
 
 #include <linux/atomic.h>
 #include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/skbuff.h>
 #include <linux/rcupdate.h>
 #include <linux/rcupdate.h>
@@ -76,7 +77,7 @@ struct neigh_parms {
 	void	*sysctl_table;
 	void	*sysctl_table;
 
 
 	int dead;
 	int dead;
-	atomic_t refcnt;
+	refcount_t refcnt;
 	struct rcu_head rcu_head;
 	struct rcu_head rcu_head;
 
 
 	int	reachable_time;
 	int	reachable_time;
@@ -137,7 +138,7 @@ struct neighbour {
 	unsigned long		confirmed;
 	unsigned long		confirmed;
 	unsigned long		updated;
 	unsigned long		updated;
 	rwlock_t		lock;
 	rwlock_t		lock;
-	atomic_t		refcnt;
+	refcount_t		refcnt;
 	struct sk_buff_head	arp_queue;
 	struct sk_buff_head	arp_queue;
 	unsigned int		arp_queue_len_bytes;
 	unsigned int		arp_queue_len_bytes;
 	struct timer_list	timer;
 	struct timer_list	timer;
@@ -395,12 +396,12 @@ void neigh_sysctl_unregister(struct neigh_parms *p);
 
 
 static inline void __neigh_parms_put(struct neigh_parms *parms)
 static inline void __neigh_parms_put(struct neigh_parms *parms)
 {
 {
-	atomic_dec(&parms->refcnt);
+	refcount_dec(&parms->refcnt);
 }
 }
 
 
 static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
 static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
 {
 {
-	atomic_inc(&parms->refcnt);
+	refcount_inc(&parms->refcnt);
 	return parms;
 	return parms;
 }
 }
 
 
@@ -410,18 +411,18 @@ static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
 
 
 static inline void neigh_release(struct neighbour *neigh)
 static inline void neigh_release(struct neighbour *neigh)
 {
 {
-	if (atomic_dec_and_test(&neigh->refcnt))
+	if (refcount_dec_and_test(&neigh->refcnt))
 		neigh_destroy(neigh);
 		neigh_destroy(neigh);
 }
 }
 
 
 static inline struct neighbour * neigh_clone(struct neighbour *neigh)
 static inline struct neighbour * neigh_clone(struct neighbour *neigh)
 {
 {
 	if (neigh)
 	if (neigh)
-		atomic_inc(&neigh->refcnt);
+		refcount_inc(&neigh->refcnt);
 	return neigh;
 	return neigh;
 }
 }
 
 
-#define neigh_hold(n)	atomic_inc(&(n)->refcnt)
+#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
 
 
 static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 {
 {

+ 2 - 1
include/net/net_namespace.h

@@ -5,6 +5,7 @@
 #define __NET_NET_NAMESPACE_H
 #define __NET_NET_NAMESPACE_H
 
 
 #include <linux/atomic.h>
 #include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/sysctl.h>
 #include <linux/sysctl.h>
@@ -46,7 +47,7 @@ struct netns_ipvs;
 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 
 
 struct net {
 struct net {
-	atomic_t		passive;	/* To decided when the network
+	refcount_t		passive;	/* To decided when the network
 						 * namespace should be freed.
 						 * namespace should be freed.
 						 */
 						 */
 	atomic_t		count;		/* To decided when the network
 	atomic_t		count;		/* To decided when the network

+ 1 - 1
include/net/netfilter/br_netfilter.h

@@ -8,7 +8,7 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
 	skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
 	skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
 
 
 	if (likely(skb->nf_bridge))
 	if (likely(skb->nf_bridge))
-		atomic_set(&(skb->nf_bridge->use), 1);
+		refcount_set(&(skb->nf_bridge->use), 1);
 
 
 	return skb->nf_bridge;
 	return skb->nf_bridge;
 }
 }

+ 4 - 4
include/net/netlabel.h

@@ -37,7 +37,7 @@
 #include <linux/in6.h>
 #include <linux/in6.h>
 #include <net/netlink.h>
 #include <net/netlink.h>
 #include <net/request_sock.h>
 #include <net/request_sock.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
 
 
 struct cipso_v4_doi;
 struct cipso_v4_doi;
 struct calipso_doi;
 struct calipso_doi;
@@ -136,7 +136,7 @@ struct netlbl_audit {
  *
  *
  */
  */
 struct netlbl_lsm_cache {
 struct netlbl_lsm_cache {
-	atomic_t refcount;
+	refcount_t refcount;
 	void (*free) (const void *data);
 	void (*free) (const void *data);
 	void *data;
 	void *data;
 };
 };
@@ -295,7 +295,7 @@ static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
 
 
 	cache = kzalloc(sizeof(*cache), flags);
 	cache = kzalloc(sizeof(*cache), flags);
 	if (cache)
 	if (cache)
-		atomic_set(&cache->refcount, 1);
+		refcount_set(&cache->refcount, 1);
 	return cache;
 	return cache;
 }
 }
 
 
@@ -309,7 +309,7 @@ static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
  */
  */
 static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
 static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
 {
 {
-	if (!atomic_dec_and_test(&cache->refcount))
+	if (!refcount_dec_and_test(&cache->refcount))
 		return;
 		return;
 
 
 	if (cache->free)
 	if (cache->free)

+ 5 - 4
include/net/request_sock.h

@@ -19,6 +19,7 @@
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/bug.h>
 #include <linux/bug.h>
+#include <linux/refcount.h>
 
 
 #include <net/sock.h>
 #include <net/sock.h>
 
 
@@ -89,7 +90,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
 		return NULL;
 		return NULL;
 	req->rsk_listener = NULL;
 	req->rsk_listener = NULL;
 	if (attach_listener) {
 	if (attach_listener) {
-		if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
+		if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
 			kmem_cache_free(ops->slab, req);
 			kmem_cache_free(ops->slab, req);
 			return NULL;
 			return NULL;
 		}
 		}
@@ -100,7 +101,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
 	sk_node_init(&req_to_sk(req)->sk_node);
 	sk_node_init(&req_to_sk(req)->sk_node);
 	sk_tx_queue_clear(req_to_sk(req));
 	sk_tx_queue_clear(req_to_sk(req));
 	req->saved_syn = NULL;
 	req->saved_syn = NULL;
-	atomic_set(&req->rsk_refcnt, 0);
+	refcount_set(&req->rsk_refcnt, 0);
 
 
 	return req;
 	return req;
 }
 }
@@ -108,7 +109,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
 static inline void reqsk_free(struct request_sock *req)
 static inline void reqsk_free(struct request_sock *req)
 {
 {
 	/* temporary debugging */
 	/* temporary debugging */
-	WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
+	WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
 
 
 	req->rsk_ops->destructor(req);
 	req->rsk_ops->destructor(req);
 	if (req->rsk_listener)
 	if (req->rsk_listener)
@@ -119,7 +120,7 @@ static inline void reqsk_free(struct request_sock *req)
 
 
 static inline void reqsk_put(struct request_sock *req)
 static inline void reqsk_put(struct request_sock *req)
 {
 {
-	if (atomic_dec_and_test(&req->rsk_refcnt))
+	if (refcount_dec_and_test(&req->rsk_refcnt))
 		reqsk_free(req);
 		reqsk_free(req);
 }
 }
 
 

+ 13 - 12
include/net/sock.h

@@ -66,6 +66,7 @@
 #include <linux/poll.h>
 #include <linux/poll.h>
 
 
 #include <linux/atomic.h>
 #include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <net/dst.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 #include <net/checksum.h>
 #include <net/tcp_states.h>
 #include <net/tcp_states.h>
@@ -219,7 +220,7 @@ struct sock_common {
 		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
 		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
 	};
 	};
 
 
-	atomic_t		skc_refcnt;
+	refcount_t		skc_refcnt;
 	/* private: */
 	/* private: */
 	int                     skc_dontcopy_end[0];
 	int                     skc_dontcopy_end[0];
 	union {
 	union {
@@ -390,7 +391,7 @@ struct sock {
 
 
 	/* ===== cache line for TX ===== */
 	/* ===== cache line for TX ===== */
 	int			sk_wmem_queued;
 	int			sk_wmem_queued;
-	atomic_t		sk_wmem_alloc;
+	refcount_t		sk_wmem_alloc;
 	unsigned long		sk_tsq_flags;
 	unsigned long		sk_tsq_flags;
 	struct sk_buff		*sk_send_head;
 	struct sk_buff		*sk_send_head;
 	struct sk_buff_head	sk_write_queue;
 	struct sk_buff_head	sk_write_queue;
@@ -611,7 +612,7 @@ static inline bool __sk_del_node_init(struct sock *sk)
 
 
 static __always_inline void sock_hold(struct sock *sk)
 static __always_inline void sock_hold(struct sock *sk)
 {
 {
-	atomic_inc(&sk->sk_refcnt);
+	refcount_inc(&sk->sk_refcnt);
 }
 }
 
 
 /* Ungrab socket in the context, which assumes that socket refcnt
 /* Ungrab socket in the context, which assumes that socket refcnt
@@ -619,7 +620,7 @@ static __always_inline void sock_hold(struct sock *sk)
  */
  */
 static __always_inline void __sock_put(struct sock *sk)
 static __always_inline void __sock_put(struct sock *sk)
 {
 {
-	atomic_dec(&sk->sk_refcnt);
+	refcount_dec(&sk->sk_refcnt);
 }
 }
 
 
 static inline bool sk_del_node_init(struct sock *sk)
 static inline bool sk_del_node_init(struct sock *sk)
@@ -628,7 +629,7 @@ static inline bool sk_del_node_init(struct sock *sk)
 
 
 	if (rc) {
 	if (rc) {
 		/* paranoid for a while -acme */
 		/* paranoid for a while -acme */
-		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
 		__sock_put(sk);
 		__sock_put(sk);
 	}
 	}
 	return rc;
 	return rc;
@@ -650,7 +651,7 @@ static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
 
 
 	if (rc) {
 	if (rc) {
 		/* paranoid for a while -acme */
 		/* paranoid for a while -acme */
-		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
 		__sock_put(sk);
 		__sock_put(sk);
 	}
 	}
 	return rc;
 	return rc;
@@ -1144,9 +1145,9 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
 
 
 static inline void sk_refcnt_debug_release(const struct sock *sk)
 static inline void sk_refcnt_debug_release(const struct sock *sk)
 {
 {
-	if (atomic_read(&sk->sk_refcnt) != 1)
+	if (refcount_read(&sk->sk_refcnt) != 1)
 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
-		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
+		       sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
 }
 }
 #else /* SOCK_REFCNT_DEBUG */
 #else /* SOCK_REFCNT_DEBUG */
 #define sk_refcnt_debug_inc(sk) do { } while (0)
 #define sk_refcnt_debug_inc(sk) do { } while (0)
@@ -1636,7 +1637,7 @@ void sock_init_data(struct socket *sock, struct sock *sk);
 /* Ungrab socket and destroy it, if it was the last reference. */
 /* Ungrab socket and destroy it, if it was the last reference. */
 static inline void sock_put(struct sock *sk)
 static inline void sock_put(struct sock *sk)
 {
 {
-	if (atomic_dec_and_test(&sk->sk_refcnt))
+	if (refcount_dec_and_test(&sk->sk_refcnt))
 		sk_free(sk);
 		sk_free(sk);
 }
 }
 /* Generic version of sock_put(), dealing with all sockets
 /* Generic version of sock_put(), dealing with all sockets
@@ -1911,7 +1912,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
  */
  */
 static inline int sk_wmem_alloc_get(const struct sock *sk)
 static inline int sk_wmem_alloc_get(const struct sock *sk)
 {
 {
-	return atomic_read(&sk->sk_wmem_alloc) - 1;
+	return refcount_read(&sk->sk_wmem_alloc) - 1;
 }
 }
 
 
 /**
 /**
@@ -2055,7 +2056,7 @@ static inline unsigned long sock_wspace(struct sock *sk)
 	int amt = 0;
 	int amt = 0;
 
 
 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
-		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
+		amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
 		if (amt < 0)
 		if (amt < 0)
 			amt = 0;
 			amt = 0;
 	}
 	}
@@ -2136,7 +2137,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
  */
  */
 static inline bool sock_writeable(const struct sock *sk)
 static inline bool sock_writeable(const struct sock *sk)
 {
 {
-	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+	return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
 }
 }
 
 
 static inline gfp_t gfp_any(void)
 static inline gfp_t gfp_any(void)

+ 1 - 1
net/atm/br2684.c

@@ -252,7 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 
 
 	ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
 	ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
-	atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
 	ATM_SKB(skb)->atm_options = atmvcc->atm_options;
 	ATM_SKB(skb)->atm_options = atmvcc->atm_options;
 	dev->stats.tx_packets++;
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += skb->len;
 	dev->stats.tx_bytes += skb->len;

+ 4 - 4
net/atm/clip.c

@@ -137,11 +137,11 @@ static int neigh_check_cb(struct neighbour *n)
 	if (entry->vccs || time_before(jiffies, entry->expires))
 	if (entry->vccs || time_before(jiffies, entry->expires))
 		return 0;
 		return 0;
 
 
-	if (atomic_read(&n->refcnt) > 1) {
+	if (refcount_read(&n->refcnt) > 1) {
 		struct sk_buff *skb;
 		struct sk_buff *skb;
 
 
 		pr_debug("destruction postponed with ref %d\n",
 		pr_debug("destruction postponed with ref %d\n",
-			 atomic_read(&n->refcnt));
+			 refcount_read(&n->refcnt));
 
 
 		while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
 		while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
 			dev_kfree_skb(skb);
 			dev_kfree_skb(skb);
@@ -381,7 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
 		memcpy(here, llc_oui, sizeof(llc_oui));
 		memcpy(here, llc_oui, sizeof(llc_oui));
 		((__be16 *) here)[3] = skb->protocol;
 		((__be16 *) here)[3] = skb->protocol;
 	}
 	}
-	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
 	entry->vccs->last_use = jiffies;
 	entry->vccs->last_use = jiffies;
 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
@@ -767,7 +767,7 @@ static void atmarp_info(struct seq_file *seq, struct neighbour *n,
 			seq_printf(seq, "(resolving)\n");
 			seq_printf(seq, "(resolving)\n");
 		else
 		else
 			seq_printf(seq, "(expired, ref %d)\n",
 			seq_printf(seq, "(expired, ref %d)\n",
-				   atomic_read(&entry->neigh->refcnt));
+				   refcount_read(&entry->neigh->refcnt));
 	} else if (!svc) {
 	} else if (!svc) {
 		seq_printf(seq, "%d.%d.%d\n",
 		seq_printf(seq, "%d.%d.%d\n",
 			   clip_vcc->vcc->dev->number,
 			   clip_vcc->vcc->dev->number,

+ 5 - 5
net/atm/common.c

@@ -80,9 +80,9 @@ static void vcc_sock_destruct(struct sock *sk)
 		printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
 		printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
 		       __func__, atomic_read(&sk->sk_rmem_alloc));
 		       __func__, atomic_read(&sk->sk_rmem_alloc));
 
 
-	if (atomic_read(&sk->sk_wmem_alloc))
+	if (refcount_read(&sk->sk_wmem_alloc))
 		printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
 		printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
-		       __func__, atomic_read(&sk->sk_wmem_alloc));
+		       __func__, refcount_read(&sk->sk_wmem_alloc));
 }
 }
 
 
 static void vcc_def_wakeup(struct sock *sk)
 static void vcc_def_wakeup(struct sock *sk)
@@ -101,7 +101,7 @@ static inline int vcc_writable(struct sock *sk)
 	struct atm_vcc *vcc = atm_sk(sk);
 	struct atm_vcc *vcc = atm_sk(sk);
 
 
 	return (vcc->qos.txtp.max_sdu +
 	return (vcc->qos.txtp.max_sdu +
-		atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
+		refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
 }
 }
 
 
 static void vcc_write_space(struct sock *sk)
 static void vcc_write_space(struct sock *sk)
@@ -156,7 +156,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family, i
 	memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
 	memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
 	memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
 	memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
 	vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
 	vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
-	atomic_set(&sk->sk_wmem_alloc, 1);
+	refcount_set(&sk->sk_wmem_alloc, 1);
 	atomic_set(&sk->sk_rmem_alloc, 0);
 	atomic_set(&sk->sk_rmem_alloc, 0);
 	vcc->push = NULL;
 	vcc->push = NULL;
 	vcc->pop = NULL;
 	vcc->pop = NULL;
@@ -630,7 +630,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
 		goto out;
 		goto out;
 	}
 	}
 	pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
 	pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 
 
 	skb->dev = NULL; /* for paths shared with net_device interfaces */
 	skb->dev = NULL; /* for paths shared with net_device interfaces */
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
 	ATM_SKB(skb)->atm_options = vcc->atm_options;

+ 2 - 2
net/atm/lec.c

@@ -181,7 +181,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
 	ATM_SKB(skb)->vcc = vcc;
 	ATM_SKB(skb)->vcc = vcc;
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
 
 
-	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
 	if (vcc->send(vcc, skb) < 0) {
 	if (vcc->send(vcc, skb) < 0) {
 		dev->stats.tx_dropped++;
 		dev->stats.tx_dropped++;
 		return;
 		return;
@@ -345,7 +345,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
 	int i;
 	int i;
 	char *tmp;		/* FIXME */
 	char *tmp;		/* FIXME */
 
 
-	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
 	mesg = (struct atmlec_msg *)skb->data;
 	mesg = (struct atmlec_msg *)skb->data;
 	tmp = skb->data;
 	tmp = skb->data;
 	tmp += sizeof(struct atmlec_msg);
 	tmp += sizeof(struct atmlec_msg);

+ 2 - 2
net/atm/mpc.c

@@ -555,7 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
 					sizeof(struct llc_snap_hdr));
 					sizeof(struct llc_snap_hdr));
 	}
 	}
 
 
-	atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
 	ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
 	ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
 	entry->shortcut->send(entry->shortcut, skb);
 	entry->shortcut->send(entry->shortcut, skb);
 	entry->packets_fwded++;
 	entry->packets_fwded++;
@@ -911,7 +911,7 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
 
 
 	struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
 	struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
 	struct k_message *mesg = (struct k_message *)skb->data;
 	struct k_message *mesg = (struct k_message *)skb->data;
-	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
 
 
 	if (mpc == NULL) {
 	if (mpc == NULL) {
 		pr_info("no mpc found\n");
 		pr_info("no mpc found\n");

+ 1 - 1
net/atm/pppoatm.c

@@ -350,7 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
 		return 1;
 		return 1;
 	}
 	}
 
 
-	atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
 	ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
 	ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
 	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
 		 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
 		 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);

+ 1 - 1
net/atm/proc.c

@@ -211,7 +211,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
 		   vcc->flags, sk->sk_err,
 		   vcc->flags, sk->sk_err,
 		   sk_wmem_alloc_get(sk), sk->sk_sndbuf,
 		   sk_wmem_alloc_get(sk), sk->sk_sndbuf,
 		   sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
 		   sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
-		   atomic_read(&sk->sk_refcnt));
+		   refcount_read(&sk->sk_refcnt));
 }
 }
 
 
 static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
 static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)

+ 1 - 1
net/atm/raw.c

@@ -35,7 +35,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
 
 
 	pr_debug("(%d) %d -= %d\n",
 	pr_debug("(%d) %d -= %d\n",
 		 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
 		 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
-	atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+	WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
 	dev_kfree_skb_any(skb);
 	dev_kfree_skb_any(skb);
 	sk->sk_write_space(sk);
 	sk->sk_write_space(sk);
 }
 }

+ 1 - 1
net/atm/signaling.c

@@ -67,7 +67,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
 	struct sock *sk;
 	struct sock *sk;
 
 
 	msg = (struct atmsvc_msg *) skb->data;
 	msg = (struct atmsvc_msg *) skb->data;
-	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+	WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
 	vcc = *(struct atm_vcc **) &msg->vcc;
 	vcc = *(struct atm_vcc **) &msg->vcc;
 	pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
 	pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
 	sk = sk_atm(vcc);
 	sk = sk_atm(vcc);

+ 1 - 1
net/bluetooth/af_bluetooth.c

@@ -657,7 +657,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
 		seq_printf(seq,
 		seq_printf(seq,
 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
 			   sk,
 			   sk,
-			   atomic_read(&sk->sk_refcnt),
+			   refcount_read(&sk->sk_refcnt),
 			   sk_rmem_alloc_get(sk),
 			   sk_rmem_alloc_get(sk),
 			   sk_wmem_alloc_get(sk),
 			   sk_wmem_alloc_get(sk),
 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),

+ 1 - 1
net/bluetooth/rfcomm/sock.c

@@ -197,7 +197,7 @@ static void rfcomm_sock_kill(struct sock *sk)
 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
 		return;
 		return;
 
 
-	BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
+	BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
 
 	/* Kill poor orphan */
 	/* Kill poor orphan */
 	bt_sock_unlink(&rfcomm_sk_list, sk);
 	bt_sock_unlink(&rfcomm_sk_list, sk);

+ 2 - 2
net/bridge/br_netfilter_hooks.c

@@ -149,12 +149,12 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
 {
 {
 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
 
 
-	if (atomic_read(&nf_bridge->use) > 1) {
+	if (refcount_read(&nf_bridge->use) > 1) {
 		struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
 		struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
 
 
 		if (tmp) {
 		if (tmp) {
 			memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
 			memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
-			atomic_set(&tmp->use, 1);
+			refcount_set(&tmp->use, 1);
 		}
 		}
 		nf_bridge_put(nf_bridge);
 		nf_bridge_put(nf_bridge);
 		nf_bridge = tmp;
 		nf_bridge = tmp;

+ 1 - 1
net/caif/caif_socket.c

@@ -1013,7 +1013,7 @@ static const struct proto_ops caif_stream_ops = {
 static void caif_sock_destructor(struct sock *sk)
 static void caif_sock_destructor(struct sock *sk)
 {
 {
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-	caif_assert(!atomic_read(&sk->sk_wmem_alloc));
+	caif_assert(!refcount_read(&sk->sk_wmem_alloc));
 	caif_assert(sk_unhashed(sk));
 	caif_assert(sk_unhashed(sk));
 	caif_assert(!sk->sk_socket);
 	caif_assert(!sk->sk_socket);
 	if (!sock_flag(sk, SOCK_DEAD)) {
 	if (!sock_flag(sk, SOCK_DEAD)) {

+ 3 - 3
net/core/datagram.c

@@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
 				}
 				}
 			}
 			}
 			*peeked = 1;
 			*peeked = 1;
-			atomic_inc(&skb->users);
+			refcount_inc(&skb->users);
 		} else {
 		} else {
 			__skb_unlink(skb, queue);
 			__skb_unlink(skb, queue);
 			if (destructor)
 			if (destructor)
@@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
 		spin_lock_bh(&sk_queue->lock);
 		spin_lock_bh(&sk_queue->lock);
 		if (skb == skb_peek(sk_queue)) {
 		if (skb == skb_peek(sk_queue)) {
 			__skb_unlink(skb, sk_queue);
 			__skb_unlink(skb, sk_queue);
-			atomic_dec(&skb->users);
+			refcount_dec(&skb->users);
 			if (destructor)
 			if (destructor)
 				destructor(sk, skb);
 				destructor(sk, skb);
 			err = 0;
 			err = 0;
@@ -614,7 +614,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
 		skb->data_len += copied;
 		skb->data_len += copied;
 		skb->len += copied;
 		skb->len += copied;
 		skb->truesize += truesize;
 		skb->truesize += truesize;
-		atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
 		while (copied) {
 		while (copied) {
 			int size = min_t(int, copied, PAGE_SIZE - start);
 			int size = min_t(int, copied, PAGE_SIZE - start);
 			skb_fill_page_desc(skb, frag++, pages[n], start, size);
 			skb_fill_page_desc(skb, frag++, pages[n], start, size);

+ 5 - 5
net/core/dev.c

@@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb,
 {
 {
 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 		return -ENOMEM;
 		return -ENOMEM;
-	atomic_inc(&skb->users);
+	refcount_inc(&skb->users);
 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 }
 
 
@@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
 	if (unlikely(!skb))
 	if (unlikely(!skb))
 		return;
 		return;
 
 
-	if (likely(atomic_read(&skb->users) == 1)) {
+	if (likely(refcount_read(&skb->users) == 1)) {
 		smp_rmb();
 		smp_rmb();
-		atomic_set(&skb->users, 0);
-	} else if (likely(!atomic_dec_and_test(&skb->users))) {
+		refcount_set(&skb->users, 0);
+	} else if (likely(!refcount_dec_and_test(&skb->users))) {
 		return;
 		return;
 	}
 	}
 	get_kfree_skb_cb(skb)->reason = reason;
 	get_kfree_skb_cb(skb)->reason = reason;
@@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
 
 
 			clist = clist->next;
 			clist = clist->next;
 
 
-			WARN_ON(atomic_read(&skb->users));
+			WARN_ON(refcount_read(&skb->users));
 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
 				trace_consume_skb(skb);
 				trace_consume_skb(skb);
 			else
 			else

+ 2 - 2
net/core/fib_rules.c

@@ -46,7 +46,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
 	if (r == NULL)
 	if (r == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	atomic_set(&r->refcnt, 1);
+	refcount_set(&r->refcnt, 1);
 	r->action = FR_ACT_TO_TBL;
 	r->action = FR_ACT_TO_TBL;
 	r->pref = pref;
 	r->pref = pref;
 	r->table = table;
 	r->table = table;
@@ -283,7 +283,7 @@ jumped:
 
 
 		if (err != -EAGAIN) {
 		if (err != -EAGAIN) {
 			if ((arg->flags & FIB_LOOKUP_NOREF) ||
 			if ((arg->flags & FIB_LOOKUP_NOREF) ||
-			    likely(atomic_inc_not_zero(&rule->refcnt))) {
+			    likely(refcount_inc_not_zero(&rule->refcnt))) {
 				arg->rule = rule;
 				arg->rule = rule;
 				goto out;
 				goto out;
 			}
 			}

+ 11 - 11
net/core/neighbour.c

@@ -124,7 +124,7 @@ static bool neigh_del(struct neighbour *n, __u8 state,
 	bool retval = false;
 	bool retval = false;
 
 
 	write_lock(&n->lock);
 	write_lock(&n->lock);
-	if (atomic_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
+	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
 		struct neighbour *neigh;
 		struct neighbour *neigh;
 
 
 		neigh = rcu_dereference_protected(n->next,
 		neigh = rcu_dereference_protected(n->next,
@@ -254,7 +254,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 			neigh_del_timer(n);
 			neigh_del_timer(n);
 			n->dead = 1;
 			n->dead = 1;
 
 
-			if (atomic_read(&n->refcnt) != 1) {
+			if (refcount_read(&n->refcnt) != 1) {
 				/* The most unpleasant situation.
 				/* The most unpleasant situation.
 				   We must destroy neighbour entry,
 				   We must destroy neighbour entry,
 				   but someone still uses it.
 				   but someone still uses it.
@@ -335,7 +335,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
 
 
 	NEIGH_CACHE_STAT_INC(tbl, allocs);
 	NEIGH_CACHE_STAT_INC(tbl, allocs);
 	n->tbl		  = tbl;
 	n->tbl		  = tbl;
-	atomic_set(&n->refcnt, 1);
+	refcount_set(&n->refcnt, 1);
 	n->dead		  = 1;
 	n->dead		  = 1;
 out:
 out:
 	return n;
 	return n;
@@ -444,7 +444,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
 	rcu_read_lock_bh();
 	rcu_read_lock_bh();
 	n = __neigh_lookup_noref(tbl, pkey, dev);
 	n = __neigh_lookup_noref(tbl, pkey, dev);
 	if (n) {
 	if (n) {
-		if (!atomic_inc_not_zero(&n->refcnt))
+		if (!refcount_inc_not_zero(&n->refcnt))
 			n = NULL;
 			n = NULL;
 		NEIGH_CACHE_STAT_INC(tbl, hits);
 		NEIGH_CACHE_STAT_INC(tbl, hits);
 	}
 	}
@@ -473,7 +473,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
 	     n = rcu_dereference_bh(n->next)) {
 	     n = rcu_dereference_bh(n->next)) {
 		if (!memcmp(n->primary_key, pkey, key_len) &&
 		if (!memcmp(n->primary_key, pkey, key_len) &&
 		    net_eq(dev_net(n->dev), net)) {
 		    net_eq(dev_net(n->dev), net)) {
-			if (!atomic_inc_not_zero(&n->refcnt))
+			if (!refcount_inc_not_zero(&n->refcnt))
 				n = NULL;
 				n = NULL;
 			NEIGH_CACHE_STAT_INC(tbl, hits);
 			NEIGH_CACHE_STAT_INC(tbl, hits);
 			break;
 			break;
@@ -709,7 +709,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms);
 
 
 static inline void neigh_parms_put(struct neigh_parms *parms)
 static inline void neigh_parms_put(struct neigh_parms *parms)
 {
 {
-	if (atomic_dec_and_test(&parms->refcnt))
+	if (refcount_dec_and_test(&parms->refcnt))
 		neigh_parms_destroy(parms);
 		neigh_parms_destroy(parms);
 }
 }
 
 
@@ -821,7 +821,7 @@ static void neigh_periodic_work(struct work_struct *work)
 			if (time_before(n->used, n->confirmed))
 			if (time_before(n->used, n->confirmed))
 				n->used = n->confirmed;
 				n->used = n->confirmed;
 
 
-			if (atomic_read(&n->refcnt) == 1 &&
+			if (refcount_read(&n->refcnt) == 1 &&
 			    (state == NUD_FAILED ||
 			    (state == NUD_FAILED ||
 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
 				*np = n->next;
 				*np = n->next;
@@ -1479,7 +1479,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
 	if (p) {
 	if (p) {
 		p->tbl		  = tbl;
 		p->tbl		  = tbl;
-		atomic_set(&p->refcnt, 1);
+		refcount_set(&p->refcnt, 1);
 		p->reachable_time =
 		p->reachable_time =
 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
 		dev_hold(dev);
 		dev_hold(dev);
@@ -1542,7 +1542,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
 	INIT_LIST_HEAD(&tbl->parms_list);
 	INIT_LIST_HEAD(&tbl->parms_list);
 	list_add(&tbl->parms.list, &tbl->parms_list);
 	list_add(&tbl->parms.list, &tbl->parms_list);
 	write_pnet(&tbl->parms.net, &init_net);
 	write_pnet(&tbl->parms.net, &init_net);
-	atomic_set(&tbl->parms.refcnt, 1);
+	refcount_set(&tbl->parms.refcnt, 1);
 	tbl->parms.reachable_time =
 	tbl->parms.reachable_time =
 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
 
 
@@ -1796,7 +1796,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
 
 
 	if ((parms->dev &&
 	if ((parms->dev &&
 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
-	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
+	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
@@ -2234,7 +2234,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
-	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
+	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
 	read_unlock_bh(&neigh->lock);
 	read_unlock_bh(&neigh->lock);
 
 
 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||

+ 1 - 1
net/core/net-sysfs.c

@@ -1448,7 +1448,7 @@ static void *net_grab_current_ns(void)
 	struct net *ns = current->nsproxy->net_ns;
 	struct net *ns = current->nsproxy->net_ns;
 #ifdef CONFIG_NET_NS
 #ifdef CONFIG_NET_NS
 	if (ns)
 	if (ns)
-		atomic_inc(&ns->passive);
+		refcount_inc(&ns->passive);
 #endif
 #endif
 	return ns;
 	return ns;
 }
 }

+ 2 - 2
net/core/net_namespace.c

@@ -284,7 +284,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 	LIST_HEAD(net_exit_list);
 	LIST_HEAD(net_exit_list);
 
 
 	atomic_set(&net->count, 1);
 	atomic_set(&net->count, 1);
-	atomic_set(&net->passive, 1);
+	refcount_set(&net->passive, 1);
 	net->dev_base_seq = 1;
 	net->dev_base_seq = 1;
 	net->user_ns = user_ns;
 	net->user_ns = user_ns;
 	idr_init(&net->netns_ids);
 	idr_init(&net->netns_ids);
@@ -380,7 +380,7 @@ static void net_free(struct net *net)
 void net_drop_ns(void *p)
 void net_drop_ns(void *p)
 {
 {
 	struct net *ns = p;
 	struct net *ns = p;
-	if (ns && atomic_dec_and_test(&ns->passive))
+	if (ns && refcount_dec_and_test(&ns->passive))
 		net_free(ns);
 		net_free(ns);
 }
 }
 
 

+ 5 - 5
net/core/netpoll.c

@@ -277,7 +277,7 @@ static void zap_completion_queue(void)
 			struct sk_buff *skb = clist;
 			struct sk_buff *skb = clist;
 			clist = clist->next;
 			clist = clist->next;
 			if (!skb_irq_freeable(skb)) {
 			if (!skb_irq_freeable(skb)) {
-				atomic_inc(&skb->users);
+				refcount_inc(&skb->users);
 				dev_kfree_skb_any(skb); /* put this one back */
 				dev_kfree_skb_any(skb); /* put this one back */
 			} else {
 			} else {
 				__kfree_skb(skb);
 				__kfree_skb(skb);
@@ -309,7 +309,7 @@ repeat:
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	atomic_set(&skb->users, 1);
+	refcount_set(&skb->users, 1);
 	skb_reserve(skb, reserve);
 	skb_reserve(skb, reserve);
 	return skb;
 	return skb;
 }
 }
@@ -632,7 +632,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
 		skb_queue_head_init(&npinfo->txq);
 		skb_queue_head_init(&npinfo->txq);
 		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
 
-		atomic_set(&npinfo->refcnt, 1);
+		refcount_set(&npinfo->refcnt, 1);
 
 
 		ops = np->dev->netdev_ops;
 		ops = np->dev->netdev_ops;
 		if (ops->ndo_netpoll_setup) {
 		if (ops->ndo_netpoll_setup) {
@@ -642,7 +642,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
 		}
 		}
 	} else {
 	} else {
 		npinfo = rtnl_dereference(ndev->npinfo);
 		npinfo = rtnl_dereference(ndev->npinfo);
-		atomic_inc(&npinfo->refcnt);
+		refcount_inc(&npinfo->refcnt);
 	}
 	}
 
 
 	npinfo->netpoll = np;
 	npinfo->netpoll = np;
@@ -821,7 +821,7 @@ void __netpoll_cleanup(struct netpoll *np)
 
 
 	synchronize_srcu(&netpoll_srcu);
 	synchronize_srcu(&netpoll_srcu);
 
 
-	if (atomic_dec_and_test(&npinfo->refcnt)) {
+	if (refcount_dec_and_test(&npinfo->refcnt)) {
 		const struct net_device_ops *ops;
 		const struct net_device_ops *ops;
 
 
 		ops = np->dev->netdev_ops;
 		ops = np->dev->netdev_ops;

+ 8 - 8
net/core/pktgen.c

@@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
 {
 {
 	ktime_t idle_start = ktime_get();
 	ktime_t idle_start = ktime_get();
 
 
-	while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+	while (refcount_read(&(pkt_dev->skb->users)) != 1) {
 		if (signal_pending(current))
 		if (signal_pending(current))
 			break;
 			break;
 
 
@@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
 	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
 		skb = pkt_dev->skb;
 		skb = pkt_dev->skb;
 		skb->protocol = eth_type_trans(skb, skb->dev);
 		skb->protocol = eth_type_trans(skb, skb->dev);
-		atomic_add(burst, &skb->users);
+		refcount_add(burst, &skb->users);
 		local_bh_disable();
 		local_bh_disable();
 		do {
 		do {
 			ret = netif_receive_skb(skb);
 			ret = netif_receive_skb(skb);
@@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 				pkt_dev->errors++;
 				pkt_dev->errors++;
 			pkt_dev->sofar++;
 			pkt_dev->sofar++;
 			pkt_dev->seq_num++;
 			pkt_dev->seq_num++;
-			if (atomic_read(&skb->users) != burst) {
+			if (refcount_read(&skb->users) != burst) {
 				/* skb was queued by rps/rfs or taps,
 				/* skb was queued by rps/rfs or taps,
 				 * so cannot reuse this skb
 				 * so cannot reuse this skb
 				 */
 				 */
-				atomic_sub(burst - 1, &skb->users);
+				WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
 				/* get out of the loop and wait
 				/* get out of the loop and wait
 				 * until skb is consumed
 				 * until skb is consumed
 				 */
 				 */
@@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 		goto out; /* Skips xmit_mode M_START_XMIT */
 		goto out; /* Skips xmit_mode M_START_XMIT */
 	} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
 	} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
 		local_bh_disable();
 		local_bh_disable();
-		atomic_inc(&pkt_dev->skb->users);
+		refcount_inc(&pkt_dev->skb->users);
 
 
 		ret = dev_queue_xmit(pkt_dev->skb);
 		ret = dev_queue_xmit(pkt_dev->skb);
 		switch (ret) {
 		switch (ret) {
@@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 		pkt_dev->last_ok = 0;
 		pkt_dev->last_ok = 0;
 		goto unlock;
 		goto unlock;
 	}
 	}
-	atomic_add(burst, &pkt_dev->skb->users);
+	refcount_add(burst, &pkt_dev->skb->users);
 
 
 xmit_more:
 xmit_more:
 	ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
 	ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
@@ -3513,11 +3513,11 @@ xmit_more:
 		/* fallthru */
 		/* fallthru */
 	case NETDEV_TX_BUSY:
 	case NETDEV_TX_BUSY:
 		/* Retry it next time */
 		/* Retry it next time */
-		atomic_dec(&(pkt_dev->skb->users));
+		refcount_dec(&(pkt_dev->skb->users));
 		pkt_dev->last_ok = 0;
 		pkt_dev->last_ok = 0;
 	}
 	}
 	if (unlikely(burst))
 	if (unlikely(burst))
-		atomic_sub(burst, &pkt_dev->skb->users);
+		WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
 unlock:
 unlock:
 	HARD_TX_UNLOCK(odev, txq);
 	HARD_TX_UNLOCK(odev, txq);
 
 

+ 1 - 1
net/core/rtnetlink.c

@@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g
 
 
 	NETLINK_CB(skb).dst_group = group;
 	NETLINK_CB(skb).dst_group = group;
 	if (echo)
 	if (echo)
-		atomic_inc(&skb->users);
+		refcount_inc(&skb->users);
 	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
 	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
 	if (echo)
 	if (echo)
 		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
 		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);

+ 13 - 13
net/core/skbuff.c

@@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
 	memset(skb, 0, offsetof(struct sk_buff, tail));
 	memset(skb, 0, offsetof(struct sk_buff, tail));
 	skb->head = NULL;
 	skb->head = NULL;
 	skb->truesize = sizeof(struct sk_buff);
 	skb->truesize = sizeof(struct sk_buff);
-	atomic_set(&skb->users, 1);
+	refcount_set(&skb->users, 1);
 
 
 	skb->mac_header = (typeof(skb->mac_header))~0U;
 	skb->mac_header = (typeof(skb->mac_header))~0U;
 out:
 out:
@@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	/* Account for allocated memory : skb + skb->head */
 	/* Account for allocated memory : skb + skb->head */
 	skb->truesize = SKB_TRUESIZE(size);
 	skb->truesize = SKB_TRUESIZE(size);
 	skb->pfmemalloc = pfmemalloc;
 	skb->pfmemalloc = pfmemalloc;
-	atomic_set(&skb->users, 1);
+	refcount_set(&skb->users, 1);
 	skb->head = data;
 	skb->head = data;
 	skb->data = data;
 	skb->data = data;
 	skb_reset_tail_pointer(skb);
 	skb_reset_tail_pointer(skb);
@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 
 
 		kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
 		kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
 		skb->fclone = SKB_FCLONE_ORIG;
 		skb->fclone = SKB_FCLONE_ORIG;
-		atomic_set(&fclones->fclone_ref, 1);
+		refcount_set(&fclones->fclone_ref, 1);
 
 
 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
 	}
 	}
@@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 
 
 	memset(skb, 0, offsetof(struct sk_buff, tail));
 	memset(skb, 0, offsetof(struct sk_buff, tail));
 	skb->truesize = SKB_TRUESIZE(size);
 	skb->truesize = SKB_TRUESIZE(size);
-	atomic_set(&skb->users, 1);
+	refcount_set(&skb->users, 1);
 	skb->head = data;
 	skb->head = data;
 	skb->data = data;
 	skb->data = data;
 	skb_reset_tail_pointer(skb);
 	skb_reset_tail_pointer(skb);
@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
 		 * This test would have no chance to be true for the clone,
 		 * This test would have no chance to be true for the clone,
 		 * while here, branch prediction will be good.
 		 * while here, branch prediction will be good.
 		 */
 		 */
-		if (atomic_read(&fclones->fclone_ref) == 1)
+		if (refcount_read(&fclones->fclone_ref) == 1)
 			goto fastpath;
 			goto fastpath;
 		break;
 		break;
 
 
@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
 		break;
 		break;
 	}
 	}
-	if (!atomic_dec_and_test(&fclones->fclone_ref))
+	if (!refcount_dec_and_test(&fclones->fclone_ref))
 		return;
 		return;
 fastpath:
 fastpath:
 	kmem_cache_free(skbuff_fclone_cache, fclones);
 	kmem_cache_free(skbuff_fclone_cache, fclones);
@@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 	C(head_frag);
 	C(head_frag);
 	C(data);
 	C(data);
 	C(truesize);
 	C(truesize);
-	atomic_set(&n->users, 1);
+	refcount_set(&n->users, 1);
 
 
 	atomic_inc(&(skb_shinfo(skb)->dataref));
 	atomic_inc(&(skb_shinfo(skb)->dataref));
 	skb->cloned = 1;
 	skb->cloned = 1;
@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 		return NULL;
 		return NULL;
 
 
 	if (skb->fclone == SKB_FCLONE_ORIG &&
 	if (skb->fclone == SKB_FCLONE_ORIG &&
-	    atomic_read(&fclones->fclone_ref) == 1) {
+	    refcount_read(&fclones->fclone_ref) == 1) {
 		n = &fclones->skb2;
 		n = &fclones->skb2;
-		atomic_set(&fclones->fclone_ref, 2);
+		refcount_set(&fclones->fclone_ref, 2);
 	} else {
 	} else {
 		if (skb_pfmemalloc(skb))
 		if (skb_pfmemalloc(skb))
 			gfp_mask |= __GFP_MEMALLOC;
 			gfp_mask |= __GFP_MEMALLOC;
@@ -3024,7 +3024,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 		get_page(pfrag->page);
 		get_page(pfrag->page);
 
 
 		skb->truesize += copy;
 		skb->truesize += copy;
-		atomic_add(copy, &sk->sk_wmem_alloc);
+		refcount_add(copy, &sk->sk_wmem_alloc);
 		skb->len += copy;
 		skb->len += copy;
 		skb->data_len += copy;
 		skb->data_len += copy;
 		offset += copy;
 		offset += copy;
@@ -3844,7 +3844,7 @@ struct sk_buff *skb_clone_sk(struct sk_buff *skb)
 	struct sock *sk = skb->sk;
 	struct sock *sk = skb->sk;
 	struct sk_buff *clone;
 	struct sk_buff *clone;
 
 
-	if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
 		return NULL;
 		return NULL;
 
 
 	clone = skb_clone(skb, GFP_ATOMIC);
 	clone = skb_clone(skb, GFP_ATOMIC);
@@ -3915,7 +3915,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
 	/* Take a reference to prevent skb_orphan() from freeing the socket,
 	/* Take a reference to prevent skb_orphan() from freeing the socket,
 	 * but only if the socket refcount is not zero.
 	 * but only if the socket refcount is not zero.
 	 */
 	 */
-	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
 		*skb_hwtstamps(skb) = *hwtstamps;
 		*skb_hwtstamps(skb) = *hwtstamps;
 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
 		sock_put(sk);
 		sock_put(sk);
@@ -3997,7 +3997,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
 	/* Take a reference to prevent skb_orphan() from freeing the socket,
 	/* Take a reference to prevent skb_orphan() from freeing the socket,
 	 * but only if the socket refcount is not zero.
 	 * but only if the socket refcount is not zero.
 	 */
 	 */
-	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
 		err = sock_queue_err_skb(sk, skb);
 		err = sock_queue_err_skb(sk, skb);
 		sock_put(sk);
 		sock_put(sk);
 	}
 	}

+ 16 - 16
net/core/sock.c

@@ -1528,7 +1528,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
 		if (likely(sk->sk_net_refcnt))
 		if (likely(sk->sk_net_refcnt))
 			get_net(net);
 			get_net(net);
 		sock_net_set(sk, net);
 		sock_net_set(sk, net);
-		atomic_set(&sk->sk_wmem_alloc, 1);
+		refcount_set(&sk->sk_wmem_alloc, 1);
 
 
 		mem_cgroup_sk_alloc(sk);
 		mem_cgroup_sk_alloc(sk);
 		cgroup_sk_alloc(&sk->sk_cgrp_data);
 		cgroup_sk_alloc(&sk->sk_cgrp_data);
@@ -1552,7 +1552,7 @@ static void __sk_destruct(struct rcu_head *head)
 		sk->sk_destruct(sk);
 		sk->sk_destruct(sk);
 
 
 	filter = rcu_dereference_check(sk->sk_filter,
 	filter = rcu_dereference_check(sk->sk_filter,
-				       atomic_read(&sk->sk_wmem_alloc) == 0);
+				       refcount_read(&sk->sk_wmem_alloc) == 0);
 	if (filter) {
 	if (filter) {
 		sk_filter_uncharge(sk, filter);
 		sk_filter_uncharge(sk, filter);
 		RCU_INIT_POINTER(sk->sk_filter, NULL);
 		RCU_INIT_POINTER(sk->sk_filter, NULL);
@@ -1602,7 +1602,7 @@ void sk_free(struct sock *sk)
 	 * some packets are still in some tx queue.
 	 * some packets are still in some tx queue.
 	 * If not null, sock_wfree() will call __sk_free(sk) later
 	 * If not null, sock_wfree() will call __sk_free(sk) later
 	 */
 	 */
-	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
+	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
 		__sk_free(sk);
 		__sk_free(sk);
 }
 }
 EXPORT_SYMBOL(sk_free);
 EXPORT_SYMBOL(sk_free);
@@ -1659,7 +1659,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 		/*
 		/*
 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
 		 */
 		 */
-		atomic_set(&newsk->sk_wmem_alloc, 1);
+		refcount_set(&newsk->sk_wmem_alloc, 1);
 		atomic_set(&newsk->sk_omem_alloc, 0);
 		atomic_set(&newsk->sk_omem_alloc, 0);
 		sk_init_common(newsk);
 		sk_init_common(newsk);
 
 
@@ -1708,7 +1708,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 		 * (Documentation/RCU/rculist_nulls.txt for details)
 		 * (Documentation/RCU/rculist_nulls.txt for details)
 		 */
 		 */
 		smp_wmb();
 		smp_wmb();
-		atomic_set(&newsk->sk_refcnt, 2);
+		refcount_set(&newsk->sk_refcnt, 2);
 
 
 		/*
 		/*
 		 * Increment the counter in the same struct proto as the master
 		 * Increment the counter in the same struct proto as the master
@@ -1787,7 +1787,7 @@ void sock_wfree(struct sk_buff *skb)
 		 * Keep a reference on sk_wmem_alloc, this will be released
 		 * Keep a reference on sk_wmem_alloc, this will be released
 		 * after sk_write_space() call
 		 * after sk_write_space() call
 		 */
 		 */
-		atomic_sub(len - 1, &sk->sk_wmem_alloc);
+		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
 		sk->sk_write_space(sk);
 		sk->sk_write_space(sk);
 		len = 1;
 		len = 1;
 	}
 	}
@@ -1795,7 +1795,7 @@ void sock_wfree(struct sk_buff *skb)
 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
 	 * could not do because of in-flight packets
 	 * could not do because of in-flight packets
 	 */
 	 */
-	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
+	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
 		__sk_free(sk);
 		__sk_free(sk);
 }
 }
 EXPORT_SYMBOL(sock_wfree);
 EXPORT_SYMBOL(sock_wfree);
@@ -1807,7 +1807,7 @@ void __sock_wfree(struct sk_buff *skb)
 {
 {
 	struct sock *sk = skb->sk;
 	struct sock *sk = skb->sk;
 
 
-	if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
+	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
 		__sk_free(sk);
 		__sk_free(sk);
 }
 }
 
 
@@ -1829,7 +1829,7 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
 	 * is enough to guarantee sk_free() wont free this sock until
 	 * is enough to guarantee sk_free() wont free this sock until
 	 * all in-flight packets are completed
 	 * all in-flight packets are completed
 	 */
 	 */
-	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 }
 }
 EXPORT_SYMBOL(skb_set_owner_w);
 EXPORT_SYMBOL(skb_set_owner_w);
 
 
@@ -1851,8 +1851,8 @@ void skb_orphan_partial(struct sk_buff *skb)
 		) {
 		) {
 		struct sock *sk = skb->sk;
 		struct sock *sk = skb->sk;
 
 
-		if (atomic_inc_not_zero(&sk->sk_refcnt)) {
-			atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+		if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+			WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
 			skb->destructor = sock_efree;
 			skb->destructor = sock_efree;
 		}
 		}
 	} else {
 	} else {
@@ -1912,7 +1912,7 @@ EXPORT_SYMBOL(sock_i_ino);
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
 			     gfp_t priority)
 			     gfp_t priority)
 {
 {
-	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+	if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
 		struct sk_buff *skb = alloc_skb(size, priority);
 		struct sk_buff *skb = alloc_skb(size, priority);
 		if (skb) {
 		if (skb) {
 			skb_set_owner_w(skb, sk);
 			skb_set_owner_w(skb, sk);
@@ -1987,7 +1987,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
 			break;
 			break;
 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+		if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
 			break;
 			break;
 		if (sk->sk_shutdown & SEND_SHUTDOWN)
 		if (sk->sk_shutdown & SEND_SHUTDOWN)
 			break;
 			break;
@@ -2310,7 +2310,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 		if (sk->sk_type == SOCK_STREAM) {
 		if (sk->sk_type == SOCK_STREAM) {
 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
 				return 1;
 				return 1;
-		} else if (atomic_read(&sk->sk_wmem_alloc) <
+		} else if (refcount_read(&sk->sk_wmem_alloc) <
 			   prot->sysctl_wmem[0])
 			   prot->sysctl_wmem[0])
 				return 1;
 				return 1;
 	}
 	}
@@ -2577,7 +2577,7 @@ static void sock_def_write_space(struct sock *sk)
 	/* Do not wake up a writer until he can make "significant"
 	/* Do not wake up a writer until he can make "significant"
 	 * progress.  --DaveM
 	 * progress.  --DaveM
 	 */
 	 */
-	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
 		wq = rcu_dereference(sk->sk_wq);
 		wq = rcu_dereference(sk->sk_wq);
 		if (skwq_has_sleeper(wq))
 		if (skwq_has_sleeper(wq))
 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
@@ -2687,7 +2687,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 	 * (Documentation/RCU/rculist_nulls.txt for details)
 	 * (Documentation/RCU/rculist_nulls.txt for details)
 	 */
 	 */
 	smp_wmb();
 	smp_wmb();
-	atomic_set(&sk->sk_refcnt, 1);
+	refcount_set(&sk->sk_refcnt, 1);
 	atomic_set(&sk->sk_drops, 0);
 	atomic_set(&sk->sk_drops, 0);
 }
 }
 EXPORT_SYMBOL(sock_init_data);
 EXPORT_SYMBOL(sock_init_data);

+ 1 - 1
net/dccp/ipv6.c

@@ -353,7 +353,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
-		atomic_inc(&skb->users);
+		refcount_inc(&skb->users);
 		ireq->pktopts = skb;
 		ireq->pktopts = skb;
 	}
 	}
 	ireq->ir_iif = sk->sk_bound_dev_if;
 	ireq->ir_iif = sk->sk_bound_dev_if;

+ 1 - 1
net/decnet/dn_neigh.c

@@ -559,7 +559,7 @@ static inline void dn_neigh_format_entry(struct seq_file *seq,
 		   (dn->flags&DN_NDFLAG_R2) ? "2" : "-",
 		   (dn->flags&DN_NDFLAG_R2) ? "2" : "-",
 		   (dn->flags&DN_NDFLAG_P3) ? "3" : "-",
 		   (dn->flags&DN_NDFLAG_P3) ? "3" : "-",
 		   dn->n.nud_state,
 		   dn->n.nud_state,
-		   atomic_read(&dn->n.refcnt),
+		   refcount_read(&dn->n.refcnt),
 		   dn->blksize,
 		   dn->blksize,
 		   (dn->n.dev) ? dn->n.dev->name : "?");
 		   (dn->n.dev) ? dn->n.dev->name : "?");
 	read_unlock(&n->lock);
 	read_unlock(&n->lock);

+ 1 - 1
net/ipv4/af_inet.c

@@ -150,7 +150,7 @@ void inet_sock_destruct(struct sock *sk)
 	}
 	}
 
 
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 	WARN_ON(sk->sk_wmem_queued);
 	WARN_ON(sk->sk_wmem_queued);
 	WARN_ON(sk->sk_forward_alloc);
 	WARN_ON(sk->sk_forward_alloc);
 
 

+ 2 - 2
net/ipv4/cipso_ipv4.c

@@ -265,7 +265,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
 		    entry->key_len == key_len &&
 		    entry->key_len == key_len &&
 		    memcmp(entry->key, key, key_len) == 0) {
 		    memcmp(entry->key, key, key_len) == 0) {
 			entry->activity += 1;
 			entry->activity += 1;
-			atomic_inc(&entry->lsm_data->refcount);
+			refcount_inc(&entry->lsm_data->refcount);
 			secattr->cache = entry->lsm_data;
 			secattr->cache = entry->lsm_data;
 			secattr->flags |= NETLBL_SECATTR_CACHE;
 			secattr->flags |= NETLBL_SECATTR_CACHE;
 			secattr->type = NETLBL_NLTYPE_CIPSOV4;
 			secattr->type = NETLBL_NLTYPE_CIPSOV4;
@@ -332,7 +332,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
 	}
 	}
 	entry->key_len = cipso_ptr_len;
 	entry->key_len = cipso_ptr_len;
 	entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
 	entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
-	atomic_inc(&secattr->cache->refcount);
+	refcount_inc(&secattr->cache->refcount);
 	entry->lsm_data = secattr->cache;
 	entry->lsm_data = secattr->cache;
 
 
 	bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
 	bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);

+ 1 - 1
net/ipv4/devinet.c

@@ -252,7 +252,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
 	/* Reference in_dev->dev */
 	/* Reference in_dev->dev */
 	dev_hold(dev);
 	dev_hold(dev);
 	/* Account for reference dev->ip_ptr (below) */
 	/* Account for reference dev->ip_ptr (below) */
-	in_dev_hold(in_dev);
+	refcount_set(&in_dev->refcnt, 1);
 
 
 	err = devinet_sysctl_register(in_dev);
 	err = devinet_sysctl_register(in_dev);
 	if (err) {
 	if (err) {

+ 1 - 1
net/ipv4/esp4.c

@@ -307,7 +307,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
 			skb->data_len += tailen;
 			skb->data_len += tailen;
 			skb->truesize += tailen;
 			skb->truesize += tailen;
 			if (sk)
 			if (sk)
-				atomic_add(tailen, &sk->sk_wmem_alloc);
+				refcount_add(tailen, &sk->sk_wmem_alloc);
 
 
 			goto out;
 			goto out;
 		}
 		}

+ 5 - 5
net/ipv4/igmp.c

@@ -173,7 +173,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
 
 static void ip_ma_put(struct ip_mc_list *im)
 static void ip_ma_put(struct ip_mc_list *im)
 {
 {
-	if (atomic_dec_and_test(&im->refcnt)) {
+	if (refcount_dec_and_test(&im->refcnt)) {
 		in_dev_put(im->interface);
 		in_dev_put(im->interface);
 		kfree_rcu(im, rcu);
 		kfree_rcu(im, rcu);
 	}
 	}
@@ -199,7 +199,7 @@ static void igmp_stop_timer(struct ip_mc_list *im)
 {
 {
 	spin_lock_bh(&im->lock);
 	spin_lock_bh(&im->lock);
 	if (del_timer(&im->timer))
 	if (del_timer(&im->timer))
-		atomic_dec(&im->refcnt);
+		refcount_dec(&im->refcnt);
 	im->tm_running = 0;
 	im->tm_running = 0;
 	im->reporter = 0;
 	im->reporter = 0;
 	im->unsolicit_count = 0;
 	im->unsolicit_count = 0;
@@ -213,7 +213,7 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
 
 
 	im->tm_running = 1;
 	im->tm_running = 1;
 	if (!mod_timer(&im->timer, jiffies+tv+2))
 	if (!mod_timer(&im->timer, jiffies+tv+2))
-		atomic_inc(&im->refcnt);
+		refcount_inc(&im->refcnt);
 }
 }
 
 
 static void igmp_gq_start_timer(struct in_device *in_dev)
 static void igmp_gq_start_timer(struct in_device *in_dev)
@@ -249,7 +249,7 @@ static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
 			spin_unlock_bh(&im->lock);
 			spin_unlock_bh(&im->lock);
 			return;
 			return;
 		}
 		}
-		atomic_dec(&im->refcnt);
+		refcount_dec(&im->refcnt);
 	}
 	}
 	igmp_start_timer(im, max_delay);
 	igmp_start_timer(im, max_delay);
 	spin_unlock_bh(&im->lock);
 	spin_unlock_bh(&im->lock);
@@ -1374,7 +1374,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 	/* initial mode is (EX, empty) */
 	/* initial mode is (EX, empty) */
 	im->sfmode = MCAST_EXCLUDE;
 	im->sfmode = MCAST_EXCLUDE;
 	im->sfcount[MCAST_EXCLUDE] = 1;
 	im->sfcount[MCAST_EXCLUDE] = 1;
-	atomic_set(&im->refcnt, 1);
+	refcount_set(&im->refcnt, 1);
 	spin_lock_init(&im->lock);
 	spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
 #ifdef CONFIG_IP_MULTICAST
 	setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
 	setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);

+ 1 - 1
net/ipv4/inet_connection_sock.c

@@ -756,7 +756,7 @@ static void reqsk_queue_hash_req(struct request_sock *req,
 	 * are committed to memory and refcnt initialized.
 	 * are committed to memory and refcnt initialized.
 	 */
 	 */
 	smp_wmb();
 	smp_wmb();
-	atomic_set(&req->rsk_refcnt, 2 + 1);
+	refcount_set(&req->rsk_refcnt, 2 + 1);
 }
 }
 
 
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,

+ 7 - 7
net/ipv4/inet_fragment.c

@@ -276,11 +276,11 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
 {
 {
 	if (del_timer(&fq->timer))
 	if (del_timer(&fq->timer))
-		atomic_dec(&fq->refcnt);
+		refcount_dec(&fq->refcnt);
 
 
 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
 	if (!(fq->flags & INET_FRAG_COMPLETE)) {
 		fq_unlink(fq, f);
 		fq_unlink(fq, f);
-		atomic_dec(&fq->refcnt);
+		refcount_dec(&fq->refcnt);
 	}
 	}
 }
 }
 EXPORT_SYMBOL(inet_frag_kill);
 EXPORT_SYMBOL(inet_frag_kill);
@@ -329,7 +329,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 	 */
 	 */
 	hlist_for_each_entry(qp, &hb->chain, list) {
 	hlist_for_each_entry(qp, &hb->chain, list) {
 		if (qp->net == nf && f->match(qp, arg)) {
 		if (qp->net == nf && f->match(qp, arg)) {
-			atomic_inc(&qp->refcnt);
+			refcount_inc(&qp->refcnt);
 			spin_unlock(&hb->chain_lock);
 			spin_unlock(&hb->chain_lock);
 			qp_in->flags |= INET_FRAG_COMPLETE;
 			qp_in->flags |= INET_FRAG_COMPLETE;
 			inet_frag_put(qp_in, f);
 			inet_frag_put(qp_in, f);
@@ -339,9 +339,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 #endif
 #endif
 	qp = qp_in;
 	qp = qp_in;
 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
 	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
-		atomic_inc(&qp->refcnt);
+		refcount_inc(&qp->refcnt);
 
 
-	atomic_inc(&qp->refcnt);
+	refcount_inc(&qp->refcnt);
 	hlist_add_head(&qp->list, &hb->chain);
 	hlist_add_head(&qp->list, &hb->chain);
 
 
 	spin_unlock(&hb->chain_lock);
 	spin_unlock(&hb->chain_lock);
@@ -370,7 +370,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 
 
 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
 	spin_lock_init(&q->lock);
 	spin_lock_init(&q->lock);
-	atomic_set(&q->refcnt, 1);
+	refcount_set(&q->refcnt, 1);
 
 
 	return q;
 	return q;
 }
 }
@@ -405,7 +405,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
 	spin_lock(&hb->chain_lock);
 	spin_lock(&hb->chain_lock);
 	hlist_for_each_entry(q, &hb->chain, list) {
 	hlist_for_each_entry(q, &hb->chain, list) {
 		if (q->net == nf && f->match(q, key)) {
 		if (q->net == nf && f->match(q, key)) {
-			atomic_inc(&q->refcnt);
+			refcount_inc(&q->refcnt);
 			spin_unlock(&hb->chain_lock);
 			spin_unlock(&hb->chain_lock);
 			return q;
 			return q;
 		}
 		}

+ 2 - 2
net/ipv4/inet_hashtables.c

@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 /* All sockets share common refcount, but have different destructors */
 /* All sockets share common refcount, but have different destructors */
 void sock_gen_put(struct sock *sk)
 void sock_gen_put(struct sock *sk)
 {
 {
-	if (!atomic_dec_and_test(&sk->sk_refcnt))
+	if (!refcount_dec_and_test(&sk->sk_refcnt))
 		return;
 		return;
 
 
 	if (sk->sk_state == TCP_TIME_WAIT)
 	if (sk->sk_state == TCP_TIME_WAIT)
@@ -287,7 +287,7 @@ begin:
 			continue;
 			continue;
 		if (likely(INET_MATCH(sk, net, acookie,
 		if (likely(INET_MATCH(sk, net, acookie,
 				      saddr, daddr, ports, dif))) {
 				      saddr, daddr, ports, dif))) {
-			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
+			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
 				goto out;
 				goto out;
 			if (unlikely(!INET_MATCH(sk, net, acookie,
 			if (unlikely(!INET_MATCH(sk, net, acookie,
 						 saddr, daddr, ports, dif))) {
 						 saddr, daddr, ports, dif))) {

+ 4 - 4
net/ipv4/inet_timewait_sock.c

@@ -76,7 +76,7 @@ void inet_twsk_free(struct inet_timewait_sock *tw)
 
 
 void inet_twsk_put(struct inet_timewait_sock *tw)
 void inet_twsk_put(struct inet_timewait_sock *tw)
 {
 {
-	if (atomic_dec_and_test(&tw->tw_refcnt))
+	if (refcount_dec_and_test(&tw->tw_refcnt))
 		inet_twsk_free(tw);
 		inet_twsk_free(tw);
 }
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 EXPORT_SYMBOL_GPL(inet_twsk_put);
@@ -131,7 +131,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
 	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
 	 * committed into memory all tw fields.
 	 * committed into memory all tw fields.
 	 */
 	 */
-	atomic_set(&tw->tw_refcnt, 4);
+	refcount_set(&tw->tw_refcnt, 4);
 	inet_twsk_add_node_rcu(tw, &ehead->chain);
 	inet_twsk_add_node_rcu(tw, &ehead->chain);
 
 
 	/* Step 3: Remove SK from hash chain */
 	/* Step 3: Remove SK from hash chain */
@@ -195,7 +195,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 		 * to a non null value before everything is setup for this
 		 * to a non null value before everything is setup for this
 		 * timewait socket.
 		 * timewait socket.
 		 */
 		 */
-		atomic_set(&tw->tw_refcnt, 0);
+		refcount_set(&tw->tw_refcnt, 0);
 
 
 		__module_get(tw->tw_prot->owner);
 		__module_get(tw->tw_prot->owner);
 	}
 	}
@@ -278,7 +278,7 @@ restart:
 				atomic_read(&twsk_net(tw)->count))
 				atomic_read(&twsk_net(tw)->count))
 				continue;
 				continue;
 
 
-			if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
+			if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
 				continue;
 				continue;
 
 
 			if (unlikely((tw->tw_family != family) ||
 			if (unlikely((tw->tw_family != family) ||

+ 9 - 9
net/ipv4/inetpeer.c

@@ -115,7 +115,7 @@ static void inetpeer_gc_worker(struct work_struct *work)
 
 
 		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
 		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
 
 
-		if (!atomic_read(&p->refcnt)) {
+		if (refcount_read(&p->refcnt) == 1) {
 			list_del(&p->gc_list);
 			list_del(&p->gc_list);
 			kmem_cache_free(peer_cachep, p);
 			kmem_cache_free(peer_cachep, p);
 		}
 		}
@@ -202,10 +202,11 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
 		int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
 		int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
 		if (cmp == 0) {
 		if (cmp == 0) {
 			/* Before taking a reference, check if this entry was
 			/* Before taking a reference, check if this entry was
-			 * deleted (refcnt=-1)
+			 * deleted (refcnt=0)
 			 */
 			 */
-			if (!atomic_add_unless(&u->refcnt, 1, -1))
+			if (!refcount_inc_not_zero(&u->refcnt)) {
 				u = NULL;
 				u = NULL;
+			}
 			return u;
 			return u;
 		}
 		}
 		if (cmp == -1)
 		if (cmp == -1)
@@ -382,11 +383,10 @@ static int inet_peer_gc(struct inet_peer_base *base,
 	while (stackptr > stack) {
 	while (stackptr > stack) {
 		stackptr--;
 		stackptr--;
 		p = rcu_deref_locked(**stackptr, base);
 		p = rcu_deref_locked(**stackptr, base);
-		if (atomic_read(&p->refcnt) == 0) {
+		if (refcount_read(&p->refcnt) == 1) {
 			smp_rmb();
 			smp_rmb();
 			delta = (__u32)jiffies - p->dtime;
 			delta = (__u32)jiffies - p->dtime;
-			if (delta >= ttl &&
-			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
+			if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) {
 				p->gc_next = gchead;
 				p->gc_next = gchead;
 				gchead = p;
 				gchead = p;
 			}
 			}
@@ -432,7 +432,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
 relookup:
 relookup:
 	p = lookup(daddr, stack, base);
 	p = lookup(daddr, stack, base);
 	if (p != peer_avl_empty) {
 	if (p != peer_avl_empty) {
-		atomic_inc(&p->refcnt);
+		refcount_inc(&p->refcnt);
 		write_sequnlock_bh(&base->lock);
 		write_sequnlock_bh(&base->lock);
 		return p;
 		return p;
 	}
 	}
@@ -444,7 +444,7 @@ relookup:
 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
 	if (p) {
 	if (p) {
 		p->daddr = *daddr;
 		p->daddr = *daddr;
-		atomic_set(&p->refcnt, 1);
+		refcount_set(&p->refcnt, 2);
 		atomic_set(&p->rid, 0);
 		atomic_set(&p->rid, 0);
 		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
 		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
 		p->rate_tokens = 0;
 		p->rate_tokens = 0;
@@ -468,7 +468,7 @@ void inet_putpeer(struct inet_peer *p)
 {
 {
 	p->dtime = (__u32)jiffies;
 	p->dtime = (__u32)jiffies;
 	smp_mb__before_atomic();
 	smp_mb__before_atomic();
-	atomic_dec(&p->refcnt);
+	refcount_dec(&p->refcnt);
 }
 }
 EXPORT_SYMBOL_GPL(inet_putpeer);
 EXPORT_SYMBOL_GPL(inet_putpeer);
 
 

+ 1 - 1
net/ipv4/ip_fragment.c

@@ -312,7 +312,7 @@ static int ip_frag_reinit(struct ipq *qp)
 	unsigned int sum_truesize = 0;
 	unsigned int sum_truesize = 0;
 
 
 	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
 	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
-		atomic_inc(&qp->q.refcnt);
+		refcount_inc(&qp->q.refcnt);
 		return -ETIMEDOUT;
 		return -ETIMEDOUT;
 	}
 	}
 
 

+ 3 - 3
net/ipv4/ip_output.c

@@ -1037,7 +1037,7 @@ alloc_new_skb:
 						(flags & MSG_DONTWAIT), &err);
 						(flags & MSG_DONTWAIT), &err);
 			} else {
 			} else {
 				skb = NULL;
 				skb = NULL;
-				if (atomic_read(&sk->sk_wmem_alloc) <=
+				if (refcount_read(&sk->sk_wmem_alloc) <=
 				    2 * sk->sk_sndbuf)
 				    2 * sk->sk_sndbuf)
 					skb = sock_wmalloc(sk,
 					skb = sock_wmalloc(sk,
 							   alloclen + hh_len + 15, 1,
 							   alloclen + hh_len + 15, 1,
@@ -1145,7 +1145,7 @@ alloc_new_skb:
 			skb->len += copy;
 			skb->len += copy;
 			skb->data_len += copy;
 			skb->data_len += copy;
 			skb->truesize += copy;
 			skb->truesize += copy;
-			atomic_add(copy, &sk->sk_wmem_alloc);
+			refcount_add(copy, &sk->sk_wmem_alloc);
 		}
 		}
 		offset += copy;
 		offset += copy;
 		length -= copy;
 		length -= copy;
@@ -1369,7 +1369,7 @@ ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
 		skb->len += len;
 		skb->len += len;
 		skb->data_len += len;
 		skb->data_len += len;
 		skb->truesize += len;
 		skb->truesize += len;
-		atomic_add(len, &sk->sk_wmem_alloc);
+		refcount_add(len, &sk->sk_wmem_alloc);
 		offset += len;
 		offset += len;
 		size -= len;
 		size -= len;
 	}
 	}

+ 2 - 2
net/ipv4/ping.c

@@ -290,7 +290,7 @@ void ping_close(struct sock *sk, long timeout)
 {
 {
 	pr_debug("ping_close(sk=%p,sk->num=%u)\n",
 	pr_debug("ping_close(sk=%p,sk->num=%u)\n",
 		 inet_sk(sk), inet_sk(sk)->inet_num);
 		 inet_sk(sk), inet_sk(sk)->inet_num);
-	pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
+	pr_debug("isk->refcnt = %d\n", refcount_read(&sk->sk_refcnt));
 
 
 	sk_common_release(sk);
 	sk_common_release(sk);
 }
 }
@@ -1127,7 +1127,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
 		0, 0L, 0,
 		0, 0L, 0,
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		0, sock_i_ino(sp),
-		atomic_read(&sp->sk_refcnt), sp,
+		refcount_read(&sp->sk_refcnt), sp,
 		atomic_read(&sp->sk_drops));
 		atomic_read(&sp->sk_drops));
 }
 }
 
 

+ 1 - 1
net/ipv4/raw.c

@@ -1063,7 +1063,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
 		0, 0L, 0,
 		0, 0L, 0,
 		from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		0, sock_i_ino(sp),
-		atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
+		refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
 }
 }
 
 
 static int raw_seq_show(struct seq_file *seq, void *v)
 static int raw_seq_show(struct seq_file *seq, void *v)

+ 1 - 1
net/ipv4/syncookies.c

@@ -213,7 +213,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
 	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
 	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
 						 NULL, &own_req);
 						 NULL, &own_req);
 	if (child) {
 	if (child) {
-		atomic_set(&req->rsk_refcnt, 1);
+		refcount_set(&req->rsk_refcnt, 1);
 		tcp_sk(child)->tsoffset = tsoff;
 		tcp_sk(child)->tsoffset = tsoff;
 		sock_rps_save_rxhash(child, skb);
 		sock_rps_save_rxhash(child, skb);
 		inet_csk_reqsk_queue_add(sk, req, child);
 		inet_csk_reqsk_queue_add(sk, req, child);

+ 2 - 2
net/ipv4/tcp.c

@@ -664,7 +664,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
 	return skb->len < size_goal &&
 	return skb->len < size_goal &&
 	       sysctl_tcp_autocorking &&
 	       sysctl_tcp_autocorking &&
 	       skb != tcp_write_queue_head(sk) &&
 	       skb != tcp_write_queue_head(sk) &&
-	       atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
+	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
 }
 }
 
 
 static void tcp_push(struct sock *sk, int flags, int mss_now,
 static void tcp_push(struct sock *sk, int flags, int mss_now,
@@ -692,7 +692,7 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
 		/* It is possible TX completion already happened
 		/* It is possible TX completion already happened
 		 * before we set TSQ_THROTTLED.
 		 * before we set TSQ_THROTTLED.
 		 */
 		 */
-		if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
+		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
 			return;
 			return;
 	}
 	}
 
 

+ 1 - 1
net/ipv4/tcp_fastopen.c

@@ -214,7 +214,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 
 
-	atomic_set(&req->rsk_refcnt, 2);
+	refcount_set(&req->rsk_refcnt, 2);
 
 
 	/* Now finish processing the fastopen child socket. */
 	/* Now finish processing the fastopen child socket. */
 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
 	inet_csk(child)->icsk_af_ops->rebuild_header(child);

+ 2 - 2
net/ipv4/tcp_ipv4.c

@@ -2323,7 +2323,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
 		icsk->icsk_probes_out,
 		icsk->icsk_probes_out,
 		sock_i_ino(sk),
 		sock_i_ino(sk),
-		atomic_read(&sk->sk_refcnt), sk,
+		refcount_read(&sk->sk_refcnt), sk,
 		jiffies_to_clock_t(icsk->icsk_rto),
 		jiffies_to_clock_t(icsk->icsk_rto),
 		jiffies_to_clock_t(icsk->icsk_ack.ato),
 		jiffies_to_clock_t(icsk->icsk_ack.ato),
 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
@@ -2349,7 +2349,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
-		atomic_read(&tw->tw_refcnt), tw);
+		refcount_read(&tw->tw_refcnt), tw);
 }
 }
 
 
 #define TMPSZ 150
 #define TMPSZ 150

+ 1 - 1
net/ipv4/tcp_offload.c

@@ -152,7 +152,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 		swap(gso_skb->sk, skb->sk);
 		swap(gso_skb->sk, skb->sk);
 		swap(gso_skb->destructor, skb->destructor);
 		swap(gso_skb->destructor, skb->destructor);
 		sum_truesize += skb->truesize;
 		sum_truesize += skb->truesize;
-		atomic_add(sum_truesize - gso_skb->truesize,
+		refcount_add(sum_truesize - gso_skb->truesize,
 			   &skb->sk->sk_wmem_alloc);
 			   &skb->sk->sk_wmem_alloc);
 	}
 	}
 
 

+ 7 - 8
net/ipv4/tcp_output.c

@@ -861,12 +861,11 @@ void tcp_wfree(struct sk_buff *skb)
 	struct sock *sk = skb->sk;
 	struct sock *sk = skb->sk;
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	unsigned long flags, nval, oval;
 	unsigned long flags, nval, oval;
-	int wmem;
 
 
 	/* Keep one reference on sk_wmem_alloc.
 	/* Keep one reference on sk_wmem_alloc.
 	 * Will be released by sk_free() from here or tcp_tasklet_func()
 	 * Will be released by sk_free() from here or tcp_tasklet_func()
 	 */
 	 */
-	wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
+	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
 
 
 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
 	 * Wait until our queues (qdisc + devices) are drained.
 	 * Wait until our queues (qdisc + devices) are drained.
@@ -875,7 +874,7 @@ void tcp_wfree(struct sk_buff *skb)
 	 * - chance for incoming ACK (processed by another cpu maybe)
 	 * - chance for incoming ACK (processed by another cpu maybe)
 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
 	 */
 	 */
-	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
+	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
 		goto out;
 		goto out;
 
 
 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
@@ -925,7 +924,7 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
 		if (nval != oval)
 		if (nval != oval)
 			continue;
 			continue;
 
 
-		if (!atomic_inc_not_zero(&sk->sk_wmem_alloc))
+		if (!refcount_inc_not_zero(&sk->sk_wmem_alloc))
 			break;
 			break;
 		/* queue this socket to tasklet queue */
 		/* queue this socket to tasklet queue */
 		tsq = this_cpu_ptr(&tsq_tasklet);
 		tsq = this_cpu_ptr(&tsq_tasklet);
@@ -1045,7 +1044,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 	skb->sk = sk;
 	skb->sk = sk;
 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
 	skb_set_hash_from_sk(skb, sk);
 	skb_set_hash_from_sk(skb, sk);
-	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 
 
 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
 
 
@@ -2176,7 +2175,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
 	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
 	limit <<= factor;
 	limit <<= factor;
 
 
-	if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
 		/* Always send the 1st or 2nd skb in write queue.
 		/* Always send the 1st or 2nd skb in write queue.
 		 * No need to wait for TX completion to call us back,
 		 * No need to wait for TX completion to call us back,
 		 * after softirq/tasklet schedule.
 		 * after softirq/tasklet schedule.
@@ -2192,7 +2191,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 		 * test again the condition.
 		 * test again the condition.
 		 */
 		 */
 		smp_mb__after_atomic();
 		smp_mb__after_atomic();
-		if (atomic_read(&sk->sk_wmem_alloc) > limit)
+		if (refcount_read(&sk->sk_wmem_alloc) > limit)
 			return true;
 			return true;
 	}
 	}
 	return false;
 	return false;
@@ -2812,7 +2811,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 	/* Do not sent more than we queued. 1/4 is reserved for possible
 	/* Do not sent more than we queued. 1/4 is reserved for possible
 	 * copying overhead: fragmentation, tunneling, mangling etc.
 	 * copying overhead: fragmentation, tunneling, mangling etc.
 	 */
 	 */
-	if (atomic_read(&sk->sk_wmem_alloc) >
+	if (refcount_read(&sk->sk_wmem_alloc) >
 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
 		  sk->sk_sndbuf))
 		  sk->sk_sndbuf))
 		return -EAGAIN;
 		return -EAGAIN;

+ 3 - 3
net/ipv4/udp.c

@@ -577,7 +577,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 
 
 	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
 	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
 			       dif, &udp_table, NULL);
 			       dif, &udp_table, NULL);
-	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 		sk = NULL;
 		sk = NULL;
 	return sk;
 	return sk;
 }
 }
@@ -2242,7 +2242,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
 					     uh->source, iph->saddr, dif);
 					     uh->source, iph->saddr, dif);
 	}
 	}
 
 
-	if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
+	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
 		return;
 		return;
 
 
 	skb->sk = sk;
 	skb->sk = sk;
@@ -2691,7 +2691,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
 		0, 0L, 0,
 		0, 0L, 0,
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		0, sock_i_ino(sp),
-		atomic_read(&sp->sk_refcnt), sp,
+		refcount_read(&sp->sk_refcnt), sp,
 		atomic_read(&sp->sk_drops));
 		atomic_read(&sp->sk_drops));
 }
 }
 
 

+ 2 - 2
net/ipv4/udp_diag.c

@@ -55,7 +55,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
 				req->id.idiag_dport,
 				req->id.idiag_dport,
 				req->id.idiag_if, tbl, NULL);
 				req->id.idiag_if, tbl, NULL);
 #endif
 #endif
-	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 		sk = NULL;
 		sk = NULL;
 	rcu_read_unlock();
 	rcu_read_unlock();
 	err = -ENOENT;
 	err = -ENOENT;
@@ -206,7 +206,7 @@ static int __udp_diag_destroy(struct sk_buff *in_skb,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 		sk = NULL;
 		sk = NULL;
 
 
 	rcu_read_unlock();
 	rcu_read_unlock();

+ 2 - 2
net/ipv6/calipso.c

@@ -227,7 +227,7 @@ static int calipso_cache_check(const unsigned char *key,
 		    entry->key_len == key_len &&
 		    entry->key_len == key_len &&
 		    memcmp(entry->key, key, key_len) == 0) {
 		    memcmp(entry->key, key, key_len) == 0) {
 			entry->activity += 1;
 			entry->activity += 1;
-			atomic_inc(&entry->lsm_data->refcount);
+			refcount_inc(&entry->lsm_data->refcount);
 			secattr->cache = entry->lsm_data;
 			secattr->cache = entry->lsm_data;
 			secattr->flags |= NETLBL_SECATTR_CACHE;
 			secattr->flags |= NETLBL_SECATTR_CACHE;
 			secattr->type = NETLBL_NLTYPE_CALIPSO;
 			secattr->type = NETLBL_NLTYPE_CALIPSO;
@@ -296,7 +296,7 @@ static int calipso_cache_add(const unsigned char *calipso_ptr,
 	}
 	}
 	entry->key_len = calipso_ptr_len;
 	entry->key_len = calipso_ptr_len;
 	entry->hash = calipso_map_cache_hash(calipso_ptr, calipso_ptr_len);
 	entry->hash = calipso_map_cache_hash(calipso_ptr, calipso_ptr_len);
-	atomic_inc(&secattr->cache->refcount);
+	refcount_inc(&secattr->cache->refcount);
 	entry->lsm_data = secattr->cache;
 	entry->lsm_data = secattr->cache;
 
 
 	bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1);
 	bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1);

+ 1 - 1
net/ipv6/datagram.c

@@ -1041,6 +1041,6 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		   0,
 		   0,
 		   sock_i_ino(sp),
 		   sock_i_ino(sp),
-		   atomic_read(&sp->sk_refcnt), sp,
+		   refcount_read(&sp->sk_refcnt), sp,
 		   atomic_read(&sp->sk_drops));
 		   atomic_read(&sp->sk_drops));
 }
 }

+ 1 - 1
net/ipv6/esp6.c

@@ -275,7 +275,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
 			skb->data_len += tailen;
 			skb->data_len += tailen;
 			skb->truesize += tailen;
 			skb->truesize += tailen;
 			if (sk)
 			if (sk)
-				atomic_add(tailen, &sk->sk_wmem_alloc);
+				refcount_add(tailen, &sk->sk_wmem_alloc);
 
 
 			goto out;
 			goto out;
 		}
 		}

+ 2 - 2
net/ipv6/inet6_hashtables.c

@@ -75,7 +75,7 @@ begin:
 			continue;
 			continue;
 		if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
 		if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
 			continue;
 			continue;
-		if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
+		if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
 			goto out;
 			goto out;
 
 
 		if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
 		if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
@@ -172,7 +172,7 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
 
 
 	sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
 	sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
 			    ntohs(dport), dif, &refcounted);
 			    ntohs(dport), dif, &refcounted);
-	if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
 		sk = NULL;
 		sk = NULL;
 	return sk;
 	return sk;
 }
 }

+ 2 - 2
net/ipv6/ip6_output.c

@@ -1472,7 +1472,7 @@ alloc_new_skb:
 						(flags & MSG_DONTWAIT), &err);
 						(flags & MSG_DONTWAIT), &err);
 			} else {
 			} else {
 				skb = NULL;
 				skb = NULL;
-				if (atomic_read(&sk->sk_wmem_alloc) <=
+				if (refcount_read(&sk->sk_wmem_alloc) <=
 				    2 * sk->sk_sndbuf)
 				    2 * sk->sk_sndbuf)
 					skb = sock_wmalloc(sk,
 					skb = sock_wmalloc(sk,
 							   alloclen + hh_len, 1,
 							   alloclen + hh_len, 1,
@@ -1581,7 +1581,7 @@ alloc_new_skb:
 			skb->len += copy;
 			skb->len += copy;
 			skb->data_len += copy;
 			skb->data_len += copy;
 			skb->truesize += copy;
 			skb->truesize += copy;
-			atomic_add(copy, &sk->sk_wmem_alloc);
+			refcount_add(copy, &sk->sk_wmem_alloc);
 		}
 		}
 		offset += copy;
 		offset += copy;
 		length -= copy;
 		length -= copy;

+ 1 - 1
net/ipv6/syncookies.c

@@ -194,7 +194,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 	if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
 	if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
-		atomic_inc(&skb->users);
+		refcount_inc(&skb->users);
 		ireq->pktopts = skb;
 		ireq->pktopts = skb;
 	}
 	}
 
 

+ 3 - 3
net/ipv6/tcp_ipv6.c

@@ -734,7 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
 	     np->rxopt.bits.rxinfo ||
 	     np->rxopt.bits.rxinfo ||
 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 	     np->rxopt.bits.rxohlim || np->repflow)) {
 	     np->rxopt.bits.rxohlim || np->repflow)) {
-		atomic_inc(&skb->users);
+		refcount_inc(&skb->users);
 		ireq->pktopts = skb;
 		ireq->pktopts = skb;
 	}
 	}
 }
 }
@@ -1809,7 +1809,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		   icsk->icsk_probes_out,
 		   icsk->icsk_probes_out,
 		   sock_i_ino(sp),
 		   sock_i_ino(sp),
-		   atomic_read(&sp->sk_refcnt), sp,
+		   refcount_read(&sp->sk_refcnt), sp,
 		   jiffies_to_clock_t(icsk->icsk_rto),
 		   jiffies_to_clock_t(icsk->icsk_rto),
 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
@@ -1842,7 +1842,7 @@ static void get_timewait6_sock(struct seq_file *seq,
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
 		   tw->tw_substate, 0, 0,
 		   tw->tw_substate, 0, 0,
 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
-		   atomic_read(&tw->tw_refcnt), tw);
+		   refcount_read(&tw->tw_refcnt), tw);
 }
 }
 
 
 static int tcp6_seq_show(struct seq_file *seq, void *v)
 static int tcp6_seq_show(struct seq_file *seq, void *v)

+ 2 - 2
net/ipv6/udp.c

@@ -325,7 +325,7 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be
 
 
 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
 				dif, &udp_table, NULL);
 				dif, &udp_table, NULL);
-	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 		sk = NULL;
 		sk = NULL;
 	return sk;
 	return sk;
 }
 }
@@ -916,7 +916,7 @@ static void udp_v6_early_demux(struct sk_buff *skb)
 	else
 	else
 		return;
 		return;
 
 
-	if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
+	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
 		return;
 		return;
 
 
 	skb->sk = sk;
 	skb->sk = sk;

+ 1 - 1
net/kcm/kcmproc.c

@@ -162,7 +162,7 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq,
 		   psock->sk->sk_receive_queue.qlen,
 		   psock->sk->sk_receive_queue.qlen,
 		   atomic_read(&psock->sk->sk_rmem_alloc),
 		   atomic_read(&psock->sk->sk_rmem_alloc),
 		   psock->sk->sk_write_queue.qlen,
 		   psock->sk->sk_write_queue.qlen,
-		   atomic_read(&psock->sk->sk_wmem_alloc));
+		   refcount_read(&psock->sk->sk_wmem_alloc));
 
 
 	if (psock->done)
 	if (psock->done)
 		seq_puts(seq, "Done ");
 		seq_puts(seq, "Done ");

+ 4 - 4
net/key/af_key.c

@@ -109,7 +109,7 @@ static void pfkey_sock_destruct(struct sock *sk)
 	}
 	}
 
 
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 
 
 	atomic_dec(&net_pfkey->socks_nr);
 	atomic_dec(&net_pfkey->socks_nr);
 }
 }
@@ -203,11 +203,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
 
 
 	sock_hold(sk);
 	sock_hold(sk);
 	if (*skb2 == NULL) {
 	if (*skb2 == NULL) {
-		if (atomic_read(&skb->users) != 1) {
+		if (refcount_read(&skb->users) != 1) {
 			*skb2 = skb_clone(skb, allocation);
 			*skb2 = skb_clone(skb, allocation);
 		} else {
 		} else {
 			*skb2 = skb;
 			*skb2 = skb;
-			atomic_inc(&skb->users);
+			refcount_inc(&skb->users);
 		}
 		}
 	}
 	}
 	if (*skb2 != NULL) {
 	if (*skb2 != NULL) {
@@ -3739,7 +3739,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
 	else
 	else
 		seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
 		seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
 			       s,
 			       s,
-			       atomic_read(&s->sk_refcnt),
+			       refcount_read(&s->sk_refcnt),
 			       sk_rmem_alloc_get(s),
 			       sk_rmem_alloc_get(s),
 			       sk_wmem_alloc_get(s),
 			       sk_wmem_alloc_get(s),
 			       from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
 			       from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),

+ 1 - 2
net/l2tp/l2tp_debugfs.c

@@ -144,9 +144,8 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
 		   tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
 		   tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
 		   "");
 		   "");
 	seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
 	seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
-		   tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
+		   tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0,
 		   atomic_read(&tunnel->ref_count));
 		   atomic_read(&tunnel->ref_count));
-
 	seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
 	seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
 		   tunnel->debug,
 		   tunnel->debug,
 		   atomic_long_read(&tunnel->stats.tx_packets),
 		   atomic_long_read(&tunnel->stats.tx_packets),

+ 4 - 4
net/llc/llc_conn.c

@@ -507,7 +507,7 @@ again:
 	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 		if (llc_estab_match(sap, daddr, laddr, rc)) {
 		if (llc_estab_match(sap, daddr, laddr, rc)) {
 			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
 			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
-			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+			if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
 				goto again;
 				goto again;
 			if (unlikely(llc_sk(rc)->sap != sap ||
 			if (unlikely(llc_sk(rc)->sap != sap ||
 				     !llc_estab_match(sap, daddr, laddr, rc))) {
 				     !llc_estab_match(sap, daddr, laddr, rc))) {
@@ -566,7 +566,7 @@ again:
 	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 		if (llc_listener_match(sap, laddr, rc)) {
 		if (llc_listener_match(sap, laddr, rc)) {
 			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
 			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
-			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+			if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
 				goto again;
 				goto again;
 			if (unlikely(llc_sk(rc)->sap != sap ||
 			if (unlikely(llc_sk(rc)->sap != sap ||
 				     !llc_listener_match(sap, laddr, rc))) {
 				     !llc_listener_match(sap, laddr, rc))) {
@@ -973,9 +973,9 @@ void llc_sk_free(struct sock *sk)
 	skb_queue_purge(&sk->sk_write_queue);
 	skb_queue_purge(&sk->sk_write_queue);
 	skb_queue_purge(&llc->pdu_unack_q);
 	skb_queue_purge(&llc->pdu_unack_q);
 #ifdef LLC_REFCNT_DEBUG
 #ifdef LLC_REFCNT_DEBUG
-	if (atomic_read(&sk->sk_refcnt) != 1) {
+	if (refcount_read(&sk->sk_refcnt) != 1) {
 		printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
 		printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
-			sk, __func__, atomic_read(&sk->sk_refcnt));
+			sk, __func__, refcount_read(&sk->sk_refcnt));
 		printk(KERN_DEBUG "%d LLC sockets are still alive\n",
 		printk(KERN_DEBUG "%d LLC sockets are still alive\n",
 			atomic_read(&llc_sock_nr));
 			atomic_read(&llc_sock_nr));
 	} else {
 	} else {

+ 1 - 1
net/llc/llc_sap.c

@@ -329,7 +329,7 @@ again:
 	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
 		if (llc_dgram_match(sap, laddr, rc)) {
 		if (llc_dgram_match(sap, laddr, rc)) {
 			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
 			/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
-			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+			if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
 				goto again;
 				goto again;
 			if (unlikely(llc_sk(rc)->sap != sap ||
 			if (unlikely(llc_sk(rc)->sap != sap ||
 				     !llc_dgram_match(sap, laddr, rc))) {
 				     !llc_dgram_match(sap, laddr, rc))) {

+ 2 - 2
net/netfilter/xt_TPROXY.c

@@ -127,7 +127,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
 						    daddr, dport,
 						    daddr, dport,
 						    in->ifindex);
 						    in->ifindex);
 
 
-			if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+			if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 				sk = NULL;
 				sk = NULL;
 			/* NOTE: we return listeners even if bound to
 			/* NOTE: we return listeners even if bound to
 			 * 0.0.0.0, those are filtered out in
 			 * 0.0.0.0, those are filtered out in
@@ -197,7 +197,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
 						   daddr, ntohs(dport),
 						   daddr, ntohs(dport),
 						   in->ifindex);
 						   in->ifindex);
 
 
-			if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+			if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
 				sk = NULL;
 				sk = NULL;
 			/* NOTE: we return listeners even if bound to
 			/* NOTE: we return listeners even if bound to
 			 * 0.0.0.0, those are filtered out in
 			 * 0.0.0.0, those are filtered out in

+ 7 - 7
net/netlink/af_netlink.c

@@ -372,7 +372,7 @@ static void netlink_sock_destruct(struct sock *sk)
 	}
 	}
 
 
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 	WARN_ON(nlk_sk(sk)->groups);
 	WARN_ON(nlk_sk(sk)->groups);
 }
 }
 
 
@@ -575,7 +575,7 @@ static void netlink_remove(struct sock *sk)
 	table = &nl_table[sk->sk_protocol];
 	table = &nl_table[sk->sk_protocol];
 	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
 	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
 				    netlink_rhashtable_params)) {
 				    netlink_rhashtable_params)) {
-		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
 		__sock_put(sk);
 		__sock_put(sk);
 	}
 	}
 
 
@@ -691,7 +691,7 @@ static void deferred_put_nlk_sk(struct rcu_head *head)
 	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
 	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
 	struct sock *sk = &nlk->sk;
 	struct sock *sk = &nlk->sk;
 
 
-	if (!atomic_dec_and_test(&sk->sk_refcnt))
+	if (!refcount_dec_and_test(&sk->sk_refcnt))
 		return;
 		return;
 
 
 	if (nlk->cb_running && nlk->cb.done) {
 	if (nlk->cb_running && nlk->cb.done) {
@@ -1848,7 +1848,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 	}
 	}
 
 
 	if (dst_group) {
 	if (dst_group) {
-		atomic_inc(&skb->users);
+		refcount_inc(&skb->users);
 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
 	}
 	}
 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
@@ -2226,7 +2226,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
 	struct netlink_sock *nlk;
 	struct netlink_sock *nlk;
 	int ret;
 	int ret;
 
 
-	atomic_inc(&skb->users);
+	refcount_inc(&skb->users);
 
 
 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
 	if (sk == NULL) {
 	if (sk == NULL) {
@@ -2431,7 +2431,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
 		int exclude_portid = 0;
 		int exclude_portid = 0;
 
 
 		if (report) {
 		if (report) {
-			atomic_inc(&skb->users);
+			refcount_inc(&skb->users);
 			exclude_portid = portid;
 			exclude_portid = portid;
 		}
 		}
 
 
@@ -2568,7 +2568,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
 			   sk_rmem_alloc_get(s),
 			   sk_rmem_alloc_get(s),
 			   sk_wmem_alloc_get(s),
 			   sk_wmem_alloc_get(s),
 			   nlk->cb_running,
 			   nlk->cb_running,
-			   atomic_read(&s->sk_refcnt),
+			   refcount_read(&s->sk_refcnt),
 			   atomic_read(&s->sk_drops),
 			   atomic_read(&s->sk_drops),
 			   sock_i_ino(s)
 			   sock_i_ino(s)
 			);
 			);

+ 7 - 7
net/packet/af_packet.c

@@ -1317,7 +1317,7 @@ static void packet_sock_destruct(struct sock *sk)
 	skb_queue_purge(&sk->sk_error_queue);
 	skb_queue_purge(&sk->sk_error_queue);
 
 
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 
 
 	if (!sock_flag(sk, SOCK_DEAD)) {
 	if (!sock_flag(sk, SOCK_DEAD)) {
 		pr_err("Attempt to release alive packet socket: %p\n", sk);
 		pr_err("Attempt to release alive packet socket: %p\n", sk);
@@ -1739,7 +1739,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 		match->flags = flags;
 		match->flags = flags;
 		INIT_LIST_HEAD(&match->list);
 		INIT_LIST_HEAD(&match->list);
 		spin_lock_init(&match->lock);
 		spin_lock_init(&match->lock);
-		atomic_set(&match->sk_ref, 0);
+		refcount_set(&match->sk_ref, 0);
 		fanout_init_data(match);
 		fanout_init_data(match);
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.dev = po->prot_hook.dev;
 		match->prot_hook.dev = po->prot_hook.dev;
@@ -1753,10 +1753,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 	    match->prot_hook.type == po->prot_hook.type &&
 	    match->prot_hook.type == po->prot_hook.type &&
 	    match->prot_hook.dev == po->prot_hook.dev) {
 	    match->prot_hook.dev == po->prot_hook.dev) {
 		err = -ENOSPC;
 		err = -ENOSPC;
-		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
+		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 			__dev_remove_pack(&po->prot_hook);
 			__dev_remove_pack(&po->prot_hook);
 			po->fanout = match;
 			po->fanout = match;
-			atomic_inc(&match->sk_ref);
+			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
 			__fanout_link(sk, po);
 			__fanout_link(sk, po);
 			err = 0;
 			err = 0;
 		}
 		}
@@ -1785,7 +1785,7 @@ static struct packet_fanout *fanout_release(struct sock *sk)
 	if (f) {
 	if (f) {
 		po->fanout = NULL;
 		po->fanout = NULL;
 
 
-		if (atomic_dec_and_test(&f->sk_ref))
+		if (refcount_dec_and_test(&f->sk_ref))
 			list_del(&f->list);
 			list_del(&f->list);
 		else
 		else
 			f = NULL;
 			f = NULL;
@@ -2523,7 +2523,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 	skb->data_len = to_write;
 	skb->data_len = to_write;
 	skb->len += to_write;
 	skb->len += to_write;
 	skb->truesize += to_write;
 	skb->truesize += to_write;
-	atomic_add(to_write, &po->sk.sk_wmem_alloc);
+	refcount_add(to_write, &po->sk.sk_wmem_alloc);
 
 
 	while (likely(to_write)) {
 	while (likely(to_write)) {
 		nr_frags = skb_shinfo(skb)->nr_frags;
 		nr_frags = skb_shinfo(skb)->nr_frags;
@@ -4495,7 +4495,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
 		seq_printf(seq,
 		seq_printf(seq,
 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
 			   s,
 			   s,
-			   atomic_read(&s->sk_refcnt),
+			   refcount_read(&s->sk_refcnt),
 			   s->sk_type,
 			   s->sk_type,
 			   ntohs(po->num),
 			   ntohs(po->num),
 			   po->ifindex,
 			   po->ifindex,

+ 3 - 1
net/packet/internal.h

@@ -1,6 +1,8 @@
 #ifndef __PACKET_INTERNAL_H__
 #ifndef __PACKET_INTERNAL_H__
 #define __PACKET_INTERNAL_H__
 #define __PACKET_INTERNAL_H__
 
 
+#include <linux/refcount.h>
+
 struct packet_mclist {
 struct packet_mclist {
 	struct packet_mclist	*next;
 	struct packet_mclist	*next;
 	int			ifindex;
 	int			ifindex;
@@ -86,7 +88,7 @@ struct packet_fanout {
 	struct list_head	list;
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
 	spinlock_t		lock;
-	atomic_t		sk_ref;
+	refcount_t		sk_ref;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 };
 
 

+ 2 - 2
net/phonet/socket.c

@@ -360,7 +360,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
 		return POLLHUP;
 		return POLLHUP;
 
 
 	if (sk->sk_state == TCP_ESTABLISHED &&
 	if (sk->sk_state == TCP_ESTABLISHED &&
-		atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
+		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
 		atomic_read(&pn->tx_credits))
 		atomic_read(&pn->tx_credits))
 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 
 
@@ -614,7 +614,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
 			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
 			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
 			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
 			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
 			sock_i_ino(sk),
 			sock_i_ino(sk),
-			atomic_read(&sk->sk_refcnt), sk,
+			refcount_read(&sk->sk_refcnt), sk,
 			atomic_read(&sk->sk_drops));
 			atomic_read(&sk->sk_drops));
 	}
 	}
 	seq_pad(seq, '\n');
 	seq_pad(seq, '\n');

+ 1 - 1
net/rds/tcp_send.c

@@ -202,7 +202,7 @@ void rds_tcp_write_space(struct sock *sk)
 	tc->t_last_seen_una = rds_tcp_snd_una(tc);
 	tc->t_last_seen_una = rds_tcp_snd_una(tc);
 	rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 	rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
 
-	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
 		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 
 
 out:
 out:

+ 3 - 3
net/rxrpc/af_rxrpc.c

@@ -53,7 +53,7 @@ static void rxrpc_sock_destructor(struct sock *);
  */
  */
 static inline int rxrpc_writable(struct sock *sk)
 static inline int rxrpc_writable(struct sock *sk)
 {
 {
-	return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
+	return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
 }
 }
 
 
 /*
 /*
@@ -730,7 +730,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
 
 
 	rxrpc_purge_queue(&sk->sk_receive_queue);
 	rxrpc_purge_queue(&sk->sk_receive_queue);
 
 
-	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 	WARN_ON(!sk_unhashed(sk));
 	WARN_ON(!sk_unhashed(sk));
 	WARN_ON(sk->sk_socket);
 	WARN_ON(sk->sk_socket);
 
 
@@ -747,7 +747,7 @@ static int rxrpc_release_sock(struct sock *sk)
 {
 {
 	struct rxrpc_sock *rx = rxrpc_sk(sk);
 	struct rxrpc_sock *rx = rxrpc_sk(sk);
 
 
-	_enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
+	_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
 
 	/* declare the socket closed for business */
 	/* declare the socket closed for business */
 	sock_orphan(sk);
 	sock_orphan(sk);

+ 6 - 6
net/rxrpc/skbuff.c

@@ -27,7 +27,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
 {
 	const void *here = __builtin_return_address(0);
 	const void *here = __builtin_return_address(0);
 	int n = atomic_inc_return(select_skb_count(op));
 	int n = atomic_inc_return(select_skb_count(op));
-	trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
 }
 }
 
 
 /*
 /*
@@ -38,7 +38,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 	const void *here = __builtin_return_address(0);
 	const void *here = __builtin_return_address(0);
 	if (skb) {
 	if (skb) {
 		int n = atomic_read(select_skb_count(op));
 		int n = atomic_read(select_skb_count(op));
-		trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
 	}
 	}
 }
 }
 
 
@@ -49,7 +49,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
 {
 	const void *here = __builtin_return_address(0);
 	const void *here = __builtin_return_address(0);
 	int n = atomic_inc_return(select_skb_count(op));
 	int n = atomic_inc_return(select_skb_count(op));
-	trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
 	skb_get(skb);
 	skb_get(skb);
 }
 }
 
 
@@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 		int n;
 		int n;
 		CHECK_SLAB_OKAY(&skb->users);
 		CHECK_SLAB_OKAY(&skb->users);
 		n = atomic_dec_return(select_skb_count(op));
 		n = atomic_dec_return(select_skb_count(op));
-		trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
 		kfree_skb(skb);
 		kfree_skb(skb);
 	}
 	}
 }
 }
@@ -78,7 +78,7 @@ void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 		int n;
 		int n;
 		CHECK_SLAB_OKAY(&skb->users);
 		CHECK_SLAB_OKAY(&skb->users);
 		n = atomic_dec_return(select_skb_count(op));
 		n = atomic_dec_return(select_skb_count(op));
-		trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+		trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
 		kfree_skb(skb);
 		kfree_skb(skb);
 	}
 	}
 }
 }
@@ -93,7 +93,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
 	while ((skb = skb_dequeue((list))) != NULL) {
 	while ((skb = skb_dequeue((list))) != NULL) {
 		int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
 		int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
 		trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
 		trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
-				atomic_read(&skb->users), n, here);
+				refcount_read(&skb->users), n, here);
 		kfree_skb(skb);
 		kfree_skb(skb);
 	}
 	}
 }
 }

+ 1 - 1
net/sched/em_meta.c

@@ -340,7 +340,7 @@ META_COLLECTOR(int_sk_refcnt)
 		*err = -1;
 		*err = -1;
 		return;
 		return;
 	}
 	}
-	dst->value = atomic_read(&skb->sk->sk_refcnt);
+	dst->value = refcount_read(&skb->sk->sk_refcnt);
 }
 }
 
 
 META_COLLECTOR(int_sk_rcvbuf)
 META_COLLECTOR(int_sk_rcvbuf)

+ 1 - 1
net/sched/sch_atm.c

@@ -498,7 +498,7 @@ static void sch_atm_dequeue(unsigned long data)
 			ATM_SKB(skb)->vcc = flow->vcc;
 			ATM_SKB(skb)->vcc = flow->vcc;
 			memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
 			memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
 			       flow->hdr_len);
 			       flow->hdr_len);
-			atomic_add(skb->truesize,
+			refcount_add(skb->truesize,
 				   &sk_atm(flow->vcc)->sk_wmem_alloc);
 				   &sk_atm(flow->vcc)->sk_wmem_alloc);
 			/* atm.atm_options are already set by atm_tc_enqueue */
 			/* atm.atm_options are already set by atm_tc_enqueue */
 			flow->vcc->send(flow->vcc, skb);
 			flow->vcc->send(flow->vcc, skb);

+ 1 - 1
net/sctp/output.c

@@ -402,7 +402,7 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
 	 * therefore only reserve a single byte to keep socket around until
 	 * therefore only reserve a single byte to keep socket around until
 	 * the packet has been transmitted.
 	 * the packet has been transmitted.
 	 */
 	 */
-	atomic_inc(&sk->sk_wmem_alloc);
+	refcount_inc(&sk->sk_wmem_alloc);
 }
 }
 
 
 static int sctp_packet_pack(struct sctp_packet *packet,
 static int sctp_packet_pack(struct sctp_packet *packet,

Some files were not shown because too many files changed in this diff