|
@@ -645,6 +645,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|
|
c->x86_capability[10] = eax;
|
|
|
}
|
|
|
|
|
|
+ /* Additional Intel-defined flags: level 0x0000000F */
|
|
|
+ if (c->cpuid_level >= 0x0000000F) {
|
|
|
+ u32 eax, ebx, ecx, edx;
|
|
|
+
|
|
|
+ /* QoS sub-leaf, EAX=0Fh, ECX=0 */
|
|
|
+ cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
|
|
|
+ c->x86_capability[11] = edx;
|
|
|
+ if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
|
|
|
+ /* will be overridden if occupancy monitoring exists */
|
|
|
+ c->x86_cache_max_rmid = ebx;
|
|
|
+
|
|
|
+ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
|
|
|
+ cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
|
|
|
+ c->x86_capability[12] = edx;
|
|
|
+ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
|
|
|
+ c->x86_cache_max_rmid = ecx;
|
|
|
+ c->x86_cache_occ_scale = ebx;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ c->x86_cache_max_rmid = -1;
|
|
|
+ c->x86_cache_occ_scale = -1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* AMD-defined flags: level 0x80000001 */
|
|
|
xlvl = cpuid_eax(0x80000000);
|
|
|
c->extended_cpuid_level = xlvl;
|
|
@@ -833,6 +857,20 @@ static void generic_identify(struct cpuinfo_x86 *c)
|
|
|
detect_nopl(c);
|
|
|
}
|
|
|
|
|
|
+static void x86_init_cache_qos(struct cpuinfo_x86 *c)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * The heavy lifting of max_rmid and cache_occ_scale are handled
|
|
|
+ * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
|
|
|
+ * in case CQM bits really aren't there in this CPU.
|
|
|
+ */
|
|
|
+ if (c != &boot_cpu_data) {
|
|
|
+ boot_cpu_data.x86_cache_max_rmid =
|
|
|
+ min(boot_cpu_data.x86_cache_max_rmid,
|
|
|
+ c->x86_cache_max_rmid);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This does the hard work of actually picking apart the CPU stuff...
|
|
|
*/
|
|
@@ -922,6 +960,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
|
|
|
|
|
init_hypervisor(c);
|
|
|
x86_init_rdrand(c);
|
|
|
+ x86_init_cache_qos(c);
|
|
|
|
|
|
/*
|
|
|
* Clear/Set all flags overriden by options, need do it
|