|
@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
|
|
|
|
|
kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
|
|
|
skb->fclone = SKB_FCLONE_ORIG;
|
|
|
- atomic_set(&fclones->fclone_ref, 1);
|
|
|
+ refcount_set(&fclones->fclone_ref, 1);
|
|
|
|
|
|
fclones->skb2.fclone = SKB_FCLONE_CLONE;
|
|
|
}
|
|
@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
|
|
|
* This test would have no chance to be true for the clone,
|
|
|
* while here, branch prediction will be good.
|
|
|
*/
|
|
|
- if (atomic_read(&fclones->fclone_ref) == 1)
|
|
|
+ if (refcount_read(&fclones->fclone_ref) == 1)
|
|
|
goto fastpath;
|
|
|
break;
|
|
|
|
|
@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
|
|
|
fclones = container_of(skb, struct sk_buff_fclones, skb2);
|
|
|
break;
|
|
|
}
|
|
|
- if (!atomic_dec_and_test(&fclones->fclone_ref))
|
|
|
+ if (!refcount_dec_and_test(&fclones->fclone_ref))
|
|
|
return;
|
|
|
fastpath:
|
|
|
kmem_cache_free(skbuff_fclone_cache, fclones);
|
|
@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
|
|
|
return NULL;
|
|
|
|
|
|
if (skb->fclone == SKB_FCLONE_ORIG &&
|
|
|
- atomic_read(&fclones->fclone_ref) == 1) {
|
|
|
+ refcount_read(&fclones->fclone_ref) == 1) {
|
|
|
n = &fclones->skb2;
|
|
|
- atomic_set(&fclones->fclone_ref, 2);
|
|
|
+ refcount_set(&fclones->fclone_ref, 2);
|
|
|
} else {
|
|
|
if (skb_pfmemalloc(skb))
|
|
|
gfp_mask |= __GFP_MEMALLOC;
|