浏览代码

x86/intel_rdt: Implement "update" mode when writing schemata file

The schemata file can have multiple lines and it is cumbersome to update
all lines.

Remove code that requires that the user provides values for every resource
(in the right order).  If the user provides values for just a few
resources, update them and leave the rest unchanged.

Side benefit: we now check which values were updated and only send IPIs to
cpus that actually have updates.

Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Tested-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Cc: ravi.v.shankar@intel.com
Cc: fenghua.yu@intel.com
Cc: peterz@infradead.org
Cc: vikas.shivappa@intel.com
Cc: h.peter.anvin@intel.com
Link: http://lkml.kernel.org/r/1491255857-17213-3-git-send-email-vikas.shivappa@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tony Luck 8 年之前
父节点
当前提交
c4026b7b95

+ 14 - 0
Documentation/x86/intel_rdt_ui.txt

@@ -129,6 +129,20 @@ schemata format is always:
 
 
 	L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
 	L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
 
 
+Reading/writing the schemata file
+---------------------------------
+Reading the schemata file will show the state of all resources
+on all domains. When writing you only need to specify those values
+which you wish to change.  E.g.
+
+# cat schemata
+L3DATA:0=fffff;1=fffff;2=fffff;3=fffff
+L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+# echo "L3DATA:2=3c0;" > schemata
+# cat schemata
+L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
+L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+
 Example 1
 Example 1
 ---------
 ---------
 On a two socket machine (one L3 cache per socket) with just four bits
 On a two socket machine (one L3 cache per socket) with just four bits

+ 4 - 6
arch/x86/include/asm/intel_rdt.h

@@ -76,10 +76,7 @@ struct rftype {
  * @min_cbm_bits:		Minimum number of consecutive bits to be set
  * @min_cbm_bits:		Minimum number of consecutive bits to be set
  *				in a cache bit mask
  *				in a cache bit mask
  * @domains:			All domains for this resource
  * @domains:			All domains for this resource
- * @num_domains:		Number of domains active
  * @msr_base:			Base MSR address for CBMs
  * @msr_base:			Base MSR address for CBMs
- * @tmp_cbms:			Scratch space when updating schemata
- * @num_tmp_cbms:		Number of CBMs in tmp_cbms
  * @cache_level:		Which cache level defines scope of this domain
  * @cache_level:		Which cache level defines scope of this domain
  * @cbm_idx_multi:		Multiplier of CBM index
  * @cbm_idx_multi:		Multiplier of CBM index
  * @cbm_idx_offset:		Offset of CBM index. CBM index is computed by:
  * @cbm_idx_offset:		Offset of CBM index. CBM index is computed by:
@@ -94,10 +91,7 @@ struct rdt_resource {
 	int			min_cbm_bits;
 	int			min_cbm_bits;
 	u32			max_cbm;
 	u32			max_cbm;
 	struct list_head	domains;
 	struct list_head	domains;
-	int			num_domains;
 	int			msr_base;
 	int			msr_base;
-	u32			*tmp_cbms;
-	int			num_tmp_cbms;
 	int			cache_level;
 	int			cache_level;
 	int			cbm_idx_multi;
 	int			cbm_idx_multi;
 	int			cbm_idx_offset;
 	int			cbm_idx_offset;
@@ -109,12 +103,16 @@ struct rdt_resource {
  * @id:		unique id for this instance
  * @id:		unique id for this instance
  * @cpu_mask:	which cpus share this resource
  * @cpu_mask:	which cpus share this resource
  * @cbm:	array of cache bit masks (indexed by CLOSID)
  * @cbm:	array of cache bit masks (indexed by CLOSID)
+ * @new_cbm:	new cbm value to be loaded
+ * @have_new_cbm: did user provide new_cbm for this domain
  */
  */
 struct rdt_domain {
 struct rdt_domain {
 	struct list_head	list;
 	struct list_head	list;
 	int			id;
 	int			id;
 	struct cpumask		cpu_mask;
 	struct cpumask		cpu_mask;
 	u32			*cbm;
 	u32			*cbm;
+	u32			new_cbm;
+	bool			have_new_cbm;
 };
 };
 
 
 /**
 /**

+ 0 - 2
arch/x86/kernel/cpu/intel_rdt.c

@@ -309,7 +309,6 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 
 
 	cpumask_set_cpu(cpu, &d->cpu_mask);
 	cpumask_set_cpu(cpu, &d->cpu_mask);
 	list_add_tail(&d->list, add_pos);
 	list_add_tail(&d->list, add_pos);
-	r->num_domains++;
 }
 }
 
 
 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
@@ -325,7 +324,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 
 
 	cpumask_clear_cpu(cpu, &d->cpu_mask);
 	cpumask_clear_cpu(cpu, &d->cpu_mask);
 	if (cpumask_empty(&d->cpu_mask)) {
 	if (cpumask_empty(&d->cpu_mask)) {
-		r->num_domains--;
 		kfree(d->cbm);
 		kfree(d->cbm);
 		list_del(&d->list);
 		list_del(&d->list);
 		kfree(d);
 		kfree(d);

+ 33 - 43
arch/x86/kernel/cpu/intel_rdt_schemata.c

@@ -56,17 +56,21 @@ static bool cbm_validate(unsigned long var, struct rdt_resource *r)
  * Read one cache bit mask (hex). Check that it is valid for the current
  * Read one cache bit mask (hex). Check that it is valid for the current
  * resource type.
  * resource type.
  */
  */
-static int parse_cbm(char *buf, struct rdt_resource *r)
+static int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
 {
 {
 	unsigned long data;
 	unsigned long data;
 	int ret;
 	int ret;
 
 
+	if (d->have_new_cbm)
+		return -EINVAL;
+
 	ret = kstrtoul(buf, 16, &data);
 	ret = kstrtoul(buf, 16, &data);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 	if (!cbm_validate(data, r))
 	if (!cbm_validate(data, r))
 		return -EINVAL;
 		return -EINVAL;
-	r->tmp_cbms[r->num_tmp_cbms++] = data;
+	d->new_cbm = data;
+	d->have_new_cbm = true;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -74,8 +78,8 @@ static int parse_cbm(char *buf, struct rdt_resource *r)
 /*
 /*
  * For each domain in this resource we expect to find a series of:
  * For each domain in this resource we expect to find a series of:
  *	id=mask
  *	id=mask
- * separated by ";". The "id" is in decimal, and must appear in the
- * right order.
+ * separated by ";". The "id" is in decimal, and must match one of
+ * the "id"s for this resource.
  */
  */
 static int parse_line(char *line, struct rdt_resource *r)
 static int parse_line(char *line, struct rdt_resource *r)
 {
 {
@@ -83,21 +87,21 @@ static int parse_line(char *line, struct rdt_resource *r)
 	struct rdt_domain *d;
 	struct rdt_domain *d;
 	unsigned long dom_id;
 	unsigned long dom_id;
 
 
+next:
+	if (!line || line[0] == '\0')
+		return 0;
+	dom = strsep(&line, ";");
+	id = strsep(&dom, "=");
+	if (!dom || kstrtoul(id, 10, &dom_id))
+		return -EINVAL;
 	list_for_each_entry(d, &r->domains, list) {
 	list_for_each_entry(d, &r->domains, list) {
-		dom = strsep(&line, ";");
-		if (!dom)
-			return -EINVAL;
-		id = strsep(&dom, "=");
-		if (kstrtoul(id, 10, &dom_id) || dom_id != d->id)
-			return -EINVAL;
-		if (parse_cbm(dom, r))
-			return -EINVAL;
+		if (d->id == dom_id) {
+			if (parse_cbm(dom, r, d))
+				return -EINVAL;
+			goto next;
+		}
 	}
 	}
-
-	/* Any garbage at the end of the line? */
-	if (line && line[0])
-		return -EINVAL;
-	return 0;
+	return -EINVAL;
 }
 }
 
 
 static int update_domains(struct rdt_resource *r, int closid)
 static int update_domains(struct rdt_resource *r, int closid)
@@ -105,7 +109,7 @@ static int update_domains(struct rdt_resource *r, int closid)
 	struct msr_param msr_param;
 	struct msr_param msr_param;
 	cpumask_var_t cpu_mask;
 	cpumask_var_t cpu_mask;
 	struct rdt_domain *d;
 	struct rdt_domain *d;
-	int cpu, idx = 0;
+	int cpu;
 
 
 	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
 	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
 		return -ENOMEM;
 		return -ENOMEM;
@@ -115,9 +119,13 @@ static int update_domains(struct rdt_resource *r, int closid)
 	msr_param.res = r;
 	msr_param.res = r;
 
 
 	list_for_each_entry(d, &r->domains, list) {
 	list_for_each_entry(d, &r->domains, list) {
-		cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
-		d->cbm[msr_param.low] = r->tmp_cbms[idx++];
+		if (d->have_new_cbm && d->new_cbm != d->cbm[closid]) {
+			cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+			d->cbm[closid] = d->new_cbm;
+		}
 	}
 	}
+	if (cpumask_empty(cpu_mask))
+		goto done;
 	cpu = get_cpu();
 	cpu = get_cpu();
 	/* Update CBM on this cpu if it's in cpu_mask. */
 	/* Update CBM on this cpu if it's in cpu_mask. */
 	if (cpumask_test_cpu(cpu, cpu_mask))
 	if (cpumask_test_cpu(cpu, cpu_mask))
@@ -126,6 +134,7 @@ static int update_domains(struct rdt_resource *r, int closid)
 	smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
 	smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
 	put_cpu();
 	put_cpu();
 
 
+done:
 	free_cpumask_var(cpu_mask);
 	free_cpumask_var(cpu_mask);
 
 
 	return 0;
 	return 0;
@@ -135,10 +144,10 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 				char *buf, size_t nbytes, loff_t off)
 				char *buf, size_t nbytes, loff_t off)
 {
 {
 	struct rdtgroup *rdtgrp;
 	struct rdtgroup *rdtgrp;
+	struct rdt_domain *dom;
 	struct rdt_resource *r;
 	struct rdt_resource *r;
 	char *tok, *resname;
 	char *tok, *resname;
 	int closid, ret = 0;
 	int closid, ret = 0;
-	u32 *l3_cbms = NULL;
 
 
 	/* Valid input requires a trailing newline */
 	/* Valid input requires a trailing newline */
 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
@@ -153,16 +162,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 
 
 	closid = rdtgrp->closid;
 	closid = rdtgrp->closid;
 
 
-	/* get scratch space to save all the masks while we validate input */
-	for_each_enabled_rdt_resource(r) {
-		r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
-				      GFP_KERNEL);
-		if (!r->tmp_cbms) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		r->num_tmp_cbms = 0;
-	}
+	for_each_enabled_rdt_resource(r)
+		list_for_each_entry(dom, &r->domains, list)
+			dom->have_new_cbm = false;
 
 
 	while ((tok = strsep(&buf, "\n")) != NULL) {
 	while ((tok = strsep(&buf, "\n")) != NULL) {
 		resname = strsep(&tok, ":");
 		resname = strsep(&tok, ":");
@@ -185,14 +187,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 		}
 		}
 	}
 	}
 
 
-	/* Did the parser find all the masks we need? */
-	for_each_enabled_rdt_resource(r) {
-		if (r->num_tmp_cbms != r->num_domains) {
-			ret = -EINVAL;
-			goto out;
-		}
-	}
-
 	for_each_enabled_rdt_resource(r) {
 	for_each_enabled_rdt_resource(r) {
 		ret = update_domains(r, closid);
 		ret = update_domains(r, closid);
 		if (ret)
 		if (ret)
@@ -201,10 +195,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 
 
 out:
 out:
 	rdtgroup_kn_unlock(of->kn);
 	rdtgroup_kn_unlock(of->kn);
-	for_each_enabled_rdt_resource(r) {
-		kfree(r->tmp_cbms);
-		r->tmp_cbms = NULL;
-	}
 	return ret ?: nbytes;
 	return ret ?: nbytes;
 }
 }