|
@@ -86,9 +86,9 @@ static inline unsigned int __getcpu(void)
|
|
|
unsigned int p;
|
|
|
|
|
|
/*
|
|
|
- * Load per CPU data from GDT. LSL is faster than RDTSCP and
|
|
|
- * works on all CPUs. This is volatile so that it orders
|
|
|
- * correctly wrt barrier() and to keep gcc from cleverly
|
|
|
+ * Load CPU (and node) number from GDT. LSL is faster than RDTSCP
|
|
|
+ * and works on all CPUs. This is volatile so that it orders
|
|
|
+ * correctly with respect to barrier() and to keep GCC from cleverly
|
|
|
* hoisting it out of the calling function.
|
|
|
*
|
|
|
* If RDPID is available, use it.
|
|
@@ -96,7 +96,7 @@ static inline unsigned int __getcpu(void)
|
|
|
alternative_io ("lsl %[seg],%[p]",
|
|
|
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
|
|
X86_FEATURE_RDPID,
|
|
|
- [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
|
|
|
+ [p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
|
|
|
|
|
|
return p;
|
|
|
}
|