|
@@ -632,7 +632,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
|
|
skb_queue_head_init(&npinfo->txq);
|
|
skb_queue_head_init(&npinfo->txq);
|
|
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
|
|
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
|
|
|
|
|
|
- atomic_set(&npinfo->refcnt, 1);
|
|
|
|
|
|
+ refcount_set(&npinfo->refcnt, 1);
|
|
|
|
|
|
ops = np->dev->netdev_ops;
|
|
ops = np->dev->netdev_ops;
|
|
if (ops->ndo_netpoll_setup) {
|
|
if (ops->ndo_netpoll_setup) {
|
|
@@ -642,7 +642,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
npinfo = rtnl_dereference(ndev->npinfo);
|
|
npinfo = rtnl_dereference(ndev->npinfo);
|
|
- atomic_inc(&npinfo->refcnt);
|
|
|
|
|
|
+ refcount_inc(&npinfo->refcnt);
|
|
}
|
|
}
|
|
|
|
|
|
npinfo->netpoll = np;
|
|
npinfo->netpoll = np;
|
|
@@ -821,7 +821,7 @@ void __netpoll_cleanup(struct netpoll *np)
|
|
|
|
|
|
synchronize_srcu(&netpoll_srcu);
|
|
synchronize_srcu(&netpoll_srcu);
|
|
|
|
|
|
- if (atomic_dec_and_test(&npinfo->refcnt)) {
|
|
|
|
|
|
+ if (refcount_dec_and_test(&npinfo->refcnt)) {
|
|
const struct net_device_ops *ops;
|
|
const struct net_device_ops *ops;
|
|
|
|
|
|
ops = np->dev->netdev_ops;
|
|
ops = np->dev->netdev_ops;
|