Parcourir la source

[PATCH] for_each_possible_cpu: network codes

for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu under /net

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
KAMEZAWA Hiroyuki il y a 19 ans
Parent
commit
6f91204225

+ 6 - 6
net/bridge/netfilter/ebtables.c

@@ -829,7 +829,7 @@ static int translate_table(struct ebt_replace *repl,
 				   		* sizeof(struct ebt_chainstack));
 		if (!newinfo->chainstack)
 			return -ENOMEM;
-		for_each_cpu(i) {
+		for_each_possible_cpu(i) {
 			newinfo->chainstack[i] =
 			   vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
 			if (!newinfo->chainstack[i]) {
@@ -901,7 +901,7 @@ static void get_counters(struct ebt_counter *oldcounters,
 	       sizeof(struct ebt_counter) * nentries);
 
 	/* add other counters to those of cpu 0 */
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		if (cpu == 0)
 			continue;
 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
@@ -1036,7 +1036,7 @@ static int do_replace(void __user *user, unsigned int len)
 
 	vfree(table->entries);
 	if (table->chainstack) {
-		for_each_cpu(i)
+		for_each_possible_cpu(i)
 			vfree(table->chainstack[i]);
 		vfree(table->chainstack);
 	}
@@ -1054,7 +1054,7 @@ static int do_replace(void __user *user, unsigned int len)
 	vfree(counterstmp);
 	/* can be initialized in translate_table() */
 	if (newinfo->chainstack) {
-		for_each_cpu(i)
+		for_each_possible_cpu(i)
 			vfree(newinfo->chainstack[i]);
 		vfree(newinfo->chainstack);
 	}
@@ -1201,7 +1201,7 @@ int ebt_register_table(struct ebt_table *table)
 	mutex_unlock(&ebt_mutex);
 free_chainstack:
 	if (newinfo->chainstack) {
-		for_each_cpu(i)
+		for_each_possible_cpu(i)
 			vfree(newinfo->chainstack[i]);
 		vfree(newinfo->chainstack);
 	}
@@ -1224,7 +1224,7 @@ void ebt_unregister_table(struct ebt_table *table)
 	mutex_unlock(&ebt_mutex);
 	vfree(table->private->entries);
 	if (table->private->chainstack) {
-		for_each_cpu(i)
+		for_each_possible_cpu(i)
 			vfree(table->private->chainstack[i]);
 		vfree(table->private->chainstack);
 	}

+ 1 - 1
net/core/dev.c

@@ -3346,7 +3346,7 @@ static int __init net_dev_init(void)
 	 *	Initialise the packet receive queues.
 	 */
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		struct softnet_data *queue;
 
 		queue = &per_cpu(softnet_data, i);

+ 2 - 2
net/core/flow.c

@@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
 {
 	int i;
 
-	for_each_cpu(i)
+	for_each_possible_cpu(i)
 		flow_hash_rnd_recalc(i) = 1;
 
 	flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
@@ -361,7 +361,7 @@ static int __init flow_cache_init(void)
 	flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
 	add_timer(&flow_hash_rnd_timer);
 
-	for_each_cpu(i)
+	for_each_possible_cpu(i)
 		flow_cache_cpu_prepare(i);
 
 	hotcpu_notifier(flow_cache_cpu, 0);

+ 1 - 1
net/core/neighbour.c

@@ -1627,7 +1627,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
 
 		memset(&ndst, 0, sizeof(ndst));
 
-		for_each_cpu(cpu) {
+		for_each_possible_cpu(cpu) {
 			struct neigh_statistics	*st;
 
 			st = per_cpu_ptr(tbl->stats, cpu);

+ 2 - 2
net/core/utils.c

@@ -121,7 +121,7 @@ void __init net_random_init(void)
 {
 	int i;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		struct nrnd_state *state = &per_cpu(net_rand_state,i);
 		__net_srandom(state, i+jiffies);
 	}
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
 	unsigned long seed[NR_CPUS];
 
 	get_random_bytes(seed, sizeof(seed));
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		struct nrnd_state *state = &per_cpu(net_rand_state,i);
 		__net_srandom(state, seed[i]);
 	}

+ 1 - 1
net/ipv4/icmp.c

@@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops)
 	struct inet_sock *inet;
 	int i;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		int err;
 
 		err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,

+ 4 - 4
net/ipv4/ipcomp.c

@@ -290,7 +290,7 @@ static void ipcomp_free_scratches(void)
 	if (!scratches)
 		return;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		void *scratch = *per_cpu_ptr(scratches, i);
 		if (scratch)
 			vfree(scratch);
@@ -313,7 +313,7 @@ static void **ipcomp_alloc_scratches(void)
 
 	ipcomp_scratches = scratches;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
 		if (!scratch)
 			return NULL;
@@ -344,7 +344,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
 	if (!tfms)
 		return;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
 		crypto_free_tfm(tfm);
 	}
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
 	if (!tfms)
 		goto error;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
 		if (!tfm)
 			goto error;

+ 2 - 2
net/ipv4/netfilter/arp_tables.c

@@ -646,7 +646,7 @@ static int translate_table(const char *name,
 	}
 
 	/* And one copy for every other CPU */
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
 			memcpy(newinfo->entries[i], entry0, newinfo->size);
 	}
@@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t,
 			   counters,
 			   &i);
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		if (cpu == curcpu)
 			continue;
 		i = 0;

+ 1 - 1
net/ipv4/netfilter/ip_conntrack_core.c

@@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void)
 	struct ip_conntrack_ecache *ecache;
 	int cpu;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		ecache = &per_cpu(ip_conntrack_ecache, cpu);
 		if (ecache->ct)
 			ip_conntrack_put(ecache->ct);

+ 2 - 2
net/ipv4/netfilter/ip_tables.c

@@ -735,7 +735,7 @@ translate_table(const char *name,
 	}
 
 	/* And one copy for every other CPU */
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
 			memcpy(newinfo->entries[i], entry0, newinfo->size);
 	}
@@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t,
 			  counters,
 			  &i);
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		if (cpu == curcpu)
 			continue;
 		i = 0;

+ 2 - 2
net/ipv4/proc.c

@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
 	int res = 0;
 	int cpu;
 
-	for_each_cpu(cpu)
+	for_each_possible_cpu(cpu)
 		res += proto->stats[cpu].inuse;
 
 	return res;
@@ -91,7 +91,7 @@ fold_field(void *mib[], int offt)
 	unsigned long res = 0;
 	int i;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
 		res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
 	}

+ 1 - 1
net/ipv4/route.c

@@ -3083,7 +3083,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
 		memcpy(dst, src, length);
 
 		/* Add the other cpus in, one int at a time */
-		for_each_cpu(i) {
+		for_each_possible_cpu(i) {
 			unsigned int j;
 
 			src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;

+ 2 - 2
net/ipv6/icmp.c

@@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
 	struct sock *sk;
 	int err, i, j;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
 				       &per_cpu(__icmpv6_socket, i));
 		if (err < 0) {
@@ -763,7 +763,7 @@ void icmpv6_cleanup(void)
 {
 	int i;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		sock_release(per_cpu(__icmpv6_socket, i));
 	}
 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);

+ 4 - 4
net/ipv6/ipcomp6.c

@@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void)
 	if (!scratches)
 		return;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		void *scratch = *per_cpu_ptr(scratches, i);
 
 		vfree(scratch);
@@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void)
 
 	ipcomp6_scratches = scratches;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
 		if (!scratch)
 			return NULL;
@@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
 	if (!tfms)
 		return;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
 		crypto_free_tfm(tfm);
 	}
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
 	if (!tfms)
 		goto error;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
 		if (!tfm)
 			goto error;

+ 2 - 2
net/ipv6/netfilter/ip6_tables.c

@@ -788,7 +788,7 @@ translate_table(const char *name,
 	}
 
 	/* And one copy for every other CPU */
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
 			memcpy(newinfo->entries[i], entry0, newinfo->size);
 	}
@@ -841,7 +841,7 @@ get_counters(const struct xt_table_info *t,
 			   counters,
 			   &i);
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		if (cpu == curcpu)
 			continue;
 		i = 0;

+ 2 - 2
net/ipv6/proc.c

@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
 	int res = 0;
 	int cpu;
 
-	for_each_cpu(cpu)
+	for_each_possible_cpu(cpu)
 		res += proto->stats[cpu].inuse;
 
 	return res;
@@ -140,7 +140,7 @@ fold_field(void *mib[], int offt)
         unsigned long res = 0;
         int i;
  
-        for_each_cpu(i) {
+        for_each_possible_cpu(i) {
                 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
                 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
         }

+ 1 - 1
net/netfilter/nf_conntrack_core.c

@@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void)
 	struct nf_conntrack_ecache *ecache;
 	int cpu;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		ecache = &per_cpu(nf_conntrack_ecache, cpu);
 		if (ecache->ct)
 			nf_ct_put(ecache->ct);

+ 2 - 2
net/netfilter/x_tables.c

@@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
 
 	newinfo->size = size;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		if (size <= PAGE_SIZE)
 			newinfo->entries[cpu] = kmalloc_node(size,
 							GFP_KERNEL,
@@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info)
 {
 	int cpu;
 
-	for_each_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		if (info->size <= PAGE_SIZE)
 			kfree(info->entries[cpu]);
 		else

+ 1 - 1
net/sctp/proc.c

@@ -69,7 +69,7 @@ fold_field(void *mib[], int nr)
 	unsigned long res = 0;
 	int i;
 
-	for_each_cpu(i) {
+	for_each_possible_cpu(i) {
 		res +=
 		    *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
 					 sizeof (unsigned long) * nr));

+ 1 - 1
net/socket.c

@@ -2136,7 +2136,7 @@ void socket_seq_show(struct seq_file *seq)
 	int cpu;
 	int counter = 0;
 
-	for_each_cpu(cpu)
+	for_each_possible_cpu(cpu)
 		counter += per_cpu(sockets_in_use, cpu);
 
 	/* It can be negative, by the way. 8) */