|
@@ -400,11 +400,24 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
|
|
|
|
|
|
if (amd_uncore_llc) {
|
|
if (amd_uncore_llc) {
|
|
unsigned int apicid = cpu_data(cpu).apicid;
|
|
unsigned int apicid = cpu_data(cpu).apicid;
|
|
- unsigned int nshared;
|
|
|
|
|
|
+ unsigned int nshared, subleaf, prev_eax = 0;
|
|
|
|
|
|
uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
|
|
uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
|
|
- cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
|
|
|
|
- nshared = ((eax >> 14) & 0xfff) + 1;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Iterate over Cache Topology Definition leaves until no
|
|
|
|
+ * more cache descriptions are available.
|
|
|
|
+ */
|
|
|
|
+ for (subleaf = 0; subleaf < 5; subleaf++) {
|
|
|
|
+ cpuid_count(0x8000001d, subleaf, &eax, &ebx, &ecx, &edx);
|
|
|
|
+
|
|
|
|
+ /* EAX[0:4] gives type of cache */
|
|
|
|
+ if (!(eax & 0x1f))
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ prev_eax = eax;
|
|
|
|
+ }
|
|
|
|
+ nshared = ((prev_eax >> 14) & 0xfff) + 1;
|
|
|
|
+
|
|
uncore->id = apicid - (apicid % nshared);
|
|
uncore->id = apicid - (apicid % nshared);
|
|
|
|
|
|
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
|
|
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
|