瀏覽代碼

x86/intel_rdt/mba_sc: Enable/disable MBA software controller

Currently user does memory bandwidth allocation(MBA) by specifying the
bandwidth in percentage via the resctrl schemata file:
	"/sys/fs/resctrl/schemata"

Add a new mount option "mba_MBps" to enable the user to specify MBA
in MBps:

$mount -t resctrl resctrl [-o cdp[,cdpl2][mba_MBps]] /sys/fs/resctrl

Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/1524263781-14267-3-git-send-email-vikas.shivappa@linux.intel.com
Vikas Shivappa 7 年之前
父節點
當前提交
19c635ab24
共有 3 個文件被更改,包括 41 次插入0 次删除
  1. 8 0
      arch/x86/kernel/cpu/intel_rdt.c
  2. 3 0
      arch/x86/kernel/cpu/intel_rdt.h
  3. 30 0
      arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

+ 8 - 0
arch/x86/kernel/cpu/intel_rdt.c

@@ -230,6 +230,14 @@ static inline void cache_alloc_hsw_probe(void)
 	rdt_alloc_capable = true;
 	rdt_alloc_capable = true;
 }
 }
 
 
+bool is_mba_sc(struct rdt_resource *r)
+{
+	if (!r)
+		return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
+
+	return r->membw.mba_sc;
+}
+
 /*
 /*
  * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
  * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
  * exposed to user interface and the h/w understandable delay values.
  * exposed to user interface and the h/w understandable delay values.

+ 3 - 0
arch/x86/kernel/cpu/intel_rdt.h

@@ -259,6 +259,7 @@ struct rdt_cache {
  * @min_bw:		Minimum memory bandwidth percentage user can request
  * @min_bw:		Minimum memory bandwidth percentage user can request
  * @bw_gran:		Granularity at which the memory bandwidth is allocated
  * @bw_gran:		Granularity at which the memory bandwidth is allocated
  * @delay_linear:	True if memory B/W delay is in linear scale
  * @delay_linear:	True if memory B/W delay is in linear scale
+ * @mba_sc:		True if MBA software controller(mba_sc) is enabled
  * @mb_map:		Mapping of memory B/W percentage to memory B/W delay
  * @mb_map:		Mapping of memory B/W percentage to memory B/W delay
  */
  */
 struct rdt_membw {
 struct rdt_membw {
@@ -266,6 +267,7 @@ struct rdt_membw {
 	u32		min_bw;
 	u32		min_bw;
 	u32		bw_gran;
 	u32		bw_gran;
 	u32		delay_linear;
 	u32		delay_linear;
+	bool		mba_sc;
 	u32		*mb_map;
 	u32		*mb_map;
 };
 };
 
 
@@ -445,6 +447,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
 void mbm_setup_overflow_handler(struct rdt_domain *dom,
 void mbm_setup_overflow_handler(struct rdt_domain *dom,
 				unsigned long delay_ms);
 				unsigned long delay_ms);
 void mbm_handle_overflow(struct work_struct *work);
 void mbm_handle_overflow(struct work_struct *work);
+bool is_mba_sc(struct rdt_resource *r);
 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
 void cqm_handle_limbo(struct work_struct *work);
 void cqm_handle_limbo(struct work_struct *work);
 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);

+ 30 - 0
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

@@ -1005,6 +1005,11 @@ static void l2_qos_cfg_update(void *arg)
 	wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
 	wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
 }
 }
 
 
+static inline bool is_mba_linear(void)
+{
+	return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
+}
+
 static int set_cache_qos_cfg(int level, bool enable)
 static int set_cache_qos_cfg(int level, bool enable)
 {
 {
 	void (*update)(void *arg);
 	void (*update)(void *arg);
@@ -1041,6 +1046,25 @@ static int set_cache_qos_cfg(int level, bool enable)
 	return 0;
 	return 0;
 }
 }
 
 
+/*
+ * Enable or disable the MBA software controller
+ * which helps user specify bandwidth in MBps.
+ * MBA software controller is supported only if
+ * MBM is supported and MBA is in linear scale.
+ */
+static int set_mba_sc(bool mba_sc)
+{
+	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
+
+	if (!is_mbm_enabled() || !is_mba_linear() ||
+	    mba_sc == is_mba_sc(r))
+		return -EINVAL;
+
+	r->membw.mba_sc = mba_sc;
+
+	return 0;
+}
+
 static int cdp_enable(int level, int data_type, int code_type)
 static int cdp_enable(int level, int data_type, int code_type)
 {
 {
 	struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
 	struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
@@ -1123,6 +1147,10 @@ static int parse_rdtgroupfs_options(char *data)
 			ret = cdpl2_enable();
 			ret = cdpl2_enable();
 			if (ret)
 			if (ret)
 				goto out;
 				goto out;
+		} else if (!strcmp(token, "mba_MBps")) {
+			ret = set_mba_sc(true);
+			if (ret)
+				goto out;
 		} else {
 		} else {
 			ret = -EINVAL;
 			ret = -EINVAL;
 			goto out;
 			goto out;
@@ -1445,6 +1473,8 @@ static void rdt_kill_sb(struct super_block *sb)
 	cpus_read_lock();
 	cpus_read_lock();
 	mutex_lock(&rdtgroup_mutex);
 	mutex_lock(&rdtgroup_mutex);
 
 
+	set_mba_sc(false);
+
 	/*Put everything back to default values. */
 	/*Put everything back to default values. */
 	for_each_alloc_enabled_rdt_resource(r)
 	for_each_alloc_enabled_rdt_resource(r)
 		reset_all_ctrls(r);
 		reset_all_ctrls(r);