|
@@ -31,6 +31,47 @@
|
|
|
#include <asm/intel-family.h>
|
|
|
#include <asm/intel_rdt.h>
|
|
|
|
|
|
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
|
|
|
+
|
|
|
+struct rdt_resource rdt_resources_all[] = {
|
|
|
+ {
|
|
|
+ .name = "L3",
|
|
|
+ .domains = domain_init(RDT_RESOURCE_L3),
|
|
|
+ .msr_base = IA32_L3_CBM_BASE,
|
|
|
+ .min_cbm_bits = 1,
|
|
|
+ .cache_level = 3,
|
|
|
+ .cbm_idx_multi = 1,
|
|
|
+ .cbm_idx_offset = 0
|
|
|
+ },
|
|
|
+ {
|
|
|
+ .name = "L3DATA",
|
|
|
+ .domains = domain_init(RDT_RESOURCE_L3DATA),
|
|
|
+ .msr_base = IA32_L3_CBM_BASE,
|
|
|
+ .min_cbm_bits = 1,
|
|
|
+ .cache_level = 3,
|
|
|
+ .cbm_idx_multi = 2,
|
|
|
+ .cbm_idx_offset = 0
|
|
|
+ },
|
|
|
+ {
|
|
|
+ .name = "L3CODE",
|
|
|
+ .domains = domain_init(RDT_RESOURCE_L3CODE),
|
|
|
+ .msr_base = IA32_L3_CBM_BASE,
|
|
|
+ .min_cbm_bits = 1,
|
|
|
+ .cache_level = 3,
|
|
|
+ .cbm_idx_multi = 2,
|
|
|
+ .cbm_idx_offset = 1
|
|
|
+ },
|
|
|
+ {
|
|
|
+ .name = "L2",
|
|
|
+ .domains = domain_init(RDT_RESOURCE_L2),
|
|
|
+ .msr_base = IA32_L2_CBM_BASE,
|
|
|
+ .min_cbm_bits = 1,
|
|
|
+ .cache_level = 2,
|
|
|
+ .cbm_idx_multi = 1,
|
|
|
+ .cbm_idx_offset = 0
|
|
|
+ },
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
|
|
|
* as they do not have CPUID enumeration support for Cache allocation.
|
|
@@ -54,6 +95,7 @@ static inline bool cache_alloc_hsw_probe(void)
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
|
boot_cpu_data.x86 == 6 &&
|
|
|
boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
|
|
|
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
|
|
|
u32 l, h, max_cbm = BIT_MASK(20) - 1;
|
|
|
|
|
|
if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
|
|
@@ -61,33 +103,88 @@ static inline bool cache_alloc_hsw_probe(void)
|
|
|
rdmsr(IA32_L3_CBM_BASE, l, h);
|
|
|
|
|
|
/* If all the bits were set in MSR, return success */
|
|
|
- return l == max_cbm;
|
|
|
+ if (l != max_cbm)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ r->num_closid = 4;
|
|
|
+ r->cbm_len = 20;
|
|
|
+ r->max_cbm = max_cbm;
|
|
|
+ r->min_cbm_bits = 2;
|
|
|
+ r->capable = true;
|
|
|
+ r->enabled = true;
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static void rdt_get_config(int idx, struct rdt_resource *r)
|
|
|
+{
|
|
|
+ union cpuid_0x10_1_eax eax;
|
|
|
+ union cpuid_0x10_1_edx edx;
|
|
|
+ u32 ebx, ecx;
|
|
|
+
|
|
|
+ cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
|
|
|
+ r->num_closid = edx.split.cos_max + 1;
|
|
|
+ r->cbm_len = eax.split.cbm_len + 1;
|
|
|
+ r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
|
|
|
+ r->capable = true;
|
|
|
+ r->enabled = true;
|
|
|
+}
|
|
|
+
|
|
|
+static void rdt_get_cdp_l3_config(int type)
|
|
|
+{
|
|
|
+ struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
|
|
|
+ struct rdt_resource *r = &rdt_resources_all[type];
|
|
|
+
|
|
|
+ r->num_closid = r_l3->num_closid / 2;
|
|
|
+ r->cbm_len = r_l3->cbm_len;
|
|
|
+ r->max_cbm = r_l3->max_cbm;
|
|
|
+ r->capable = true;
|
|
|
+ /*
|
|
|
+ * By default, CDP is disabled. CDP can be enabled by mount parameter
|
|
|
+ * "cdp" during resctrl file system mount time.
|
|
|
+ */
|
|
|
+ r->enabled = false;
|
|
|
+}
|
|
|
+
|
|
|
static inline bool get_rdt_resources(void)
|
|
|
{
|
|
|
+ bool ret = false;
|
|
|
+
|
|
|
if (cache_alloc_hsw_probe())
|
|
|
return true;
|
|
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_RDT_A))
|
|
|
return false;
|
|
|
- if (!boot_cpu_has(X86_FEATURE_CAT_L3))
|
|
|
- return false;
|
|
|
|
|
|
- return true;
|
|
|
+ if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
|
|
|
+ rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
|
|
|
+ if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
|
|
|
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
|
|
|
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
|
|
|
+ }
|
|
|
+ ret = true;
|
|
|
+ }
|
|
|
+ if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
|
|
|
+ /* CPUID 0x10.2 fields are same format at 0x10.1 */
|
|
|
+ rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
|
|
|
+ ret = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int __init intel_rdt_late_init(void)
|
|
|
{
|
|
|
+ struct rdt_resource *r;
|
|
|
+
|
|
|
if (!get_rdt_resources())
|
|
|
return -ENODEV;
|
|
|
|
|
|
- pr_info("Intel RDT cache allocation detected\n");
|
|
|
- if (boot_cpu_has(X86_FEATURE_CDP_L3))
|
|
|
- pr_info("Intel RDT code data prioritization detected\n");
|
|
|
+ for_each_capable_rdt_resource(r)
|
|
|
+ pr_info("Intel RDT %s allocation detected\n", r->name);
|
|
|
|
|
|
return 0;
|
|
|
}
|