|
@@ -205,7 +205,7 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
|
|
|
*/
|
|
|
static struct sock *icmp_sk(struct net *net)
|
|
|
{
|
|
|
- return net->ipv4.icmp_sk[smp_processor_id()];
|
|
|
+ return *this_cpu_ptr(net->ipv4.icmp_sk);
|
|
|
}
|
|
|
|
|
|
static inline struct sock *icmp_xmit_lock(struct net *net)
|
|
@@ -1140,8 +1140,8 @@ static void __net_exit icmp_sk_exit(struct net *net)
|
|
|
int i;
|
|
|
|
|
|
for_each_possible_cpu(i)
|
|
|
- inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
|
|
|
- kfree(net->ipv4.icmp_sk);
|
|
|
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
|
|
|
+ free_percpu(net->ipv4.icmp_sk);
|
|
|
net->ipv4.icmp_sk = NULL;
|
|
|
}
|
|
|
|
|
@@ -1149,9 +1149,8 @@ static int __net_init icmp_sk_init(struct net *net)
|
|
|
{
|
|
|
int i, err;
|
|
|
|
|
|
- net->ipv4.icmp_sk =
|
|
|
- kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
|
|
|
- if (net->ipv4.icmp_sk == NULL)
|
|
|
+ net->ipv4.icmp_sk = alloc_percpu(struct sock *);
|
|
|
+ if (!net->ipv4.icmp_sk)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
|
@@ -1162,7 +1161,7 @@ static int __net_init icmp_sk_init(struct net *net)
|
|
|
if (err < 0)
|
|
|
goto fail;
|
|
|
|
|
|
- net->ipv4.icmp_sk[i] = sk;
|
|
|
+ *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
|
|
|
|
|
|
/* Enough space for 2 64K ICMP packets, including
|
|
|
* sk_buff/skb_shared_info struct overhead.
|
|
@@ -1203,8 +1202,8 @@ static int __net_init icmp_sk_init(struct net *net)
|
|
|
|
|
|
fail:
|
|
|
for_each_possible_cpu(i)
|
|
|
- inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
|
|
|
- kfree(net->ipv4.icmp_sk);
|
|
|
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
|
|
|
+ free_percpu(net->ipv4.icmp_sk);
|
|
|
return err;
|
|
|
}
|
|
|
|