|
@@ -276,11 +276,11 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
|
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
|
{
|
|
|
if (del_timer(&fq->timer))
|
|
|
- atomic_dec(&fq->refcnt);
|
|
|
+ refcount_dec(&fq->refcnt);
|
|
|
|
|
|
if (!(fq->flags & INET_FRAG_COMPLETE)) {
|
|
|
fq_unlink(fq, f);
|
|
|
- atomic_dec(&fq->refcnt);
|
|
|
+ refcount_dec(&fq->refcnt);
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL(inet_frag_kill);
|
|
@@ -329,7 +329,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|
|
*/
|
|
|
hlist_for_each_entry(qp, &hb->chain, list) {
|
|
|
if (qp->net == nf && f->match(qp, arg)) {
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
+ refcount_inc(&qp->refcnt);
|
|
|
spin_unlock(&hb->chain_lock);
|
|
|
qp_in->flags |= INET_FRAG_COMPLETE;
|
|
|
inet_frag_put(qp_in, f);
|
|
@@ -339,9 +339,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|
|
#endif
|
|
|
qp = qp_in;
|
|
|
if (!mod_timer(&qp->timer, jiffies + nf->timeout))
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
+ refcount_inc(&qp->refcnt);
|
|
|
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
+ refcount_inc(&qp->refcnt);
|
|
|
hlist_add_head(&qp->list, &hb->chain);
|
|
|
|
|
|
spin_unlock(&hb->chain_lock);
|
|
@@ -370,7 +370,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
|
|
|
|
|
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
|
|
|
spin_lock_init(&q->lock);
|
|
|
- atomic_set(&q->refcnt, 1);
|
|
|
+ refcount_set(&q->refcnt, 1);
|
|
|
|
|
|
return q;
|
|
|
}
|
|
@@ -405,7 +405,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|
|
spin_lock(&hb->chain_lock);
|
|
|
hlist_for_each_entry(q, &hb->chain, list) {
|
|
|
if (q->net == nf && f->match(q, key)) {
|
|
|
- atomic_inc(&q->refcnt);
|
|
|
+ refcount_inc(&q->refcnt);
|
|
|
spin_unlock(&hb->chain_lock);
|
|
|
return q;
|
|
|
}
|