瀏覽代碼

Merge branch 'for-next/perf' into aarch64/for-next/core

Merge in ARM PMU and perf updates for 4.15:

  - Support for the Statistical Profiling Extension
  - Support for Hisilicon's SoC PMU

Signed-off-by: Will Deacon <will.deacon@arm.com>
Will Deacon 7 年之前
父節點
當前提交
1e0c661f05

+ 20 - 0
Documentation/devicetree/bindings/arm/spe-pmu.txt

@@ -0,0 +1,20 @@
+* ARMv8.2 Statistical Profiling Extension (SPE) Performance Monitor Units (PMU)
+
+ARMv8.2 introduces the optional Statistical Profiling Extension for collecting
+performance sample data using an in-memory trace buffer.
+
+** SPE Required properties:
+
+- compatible : should be one of:
+	       "arm,statistical-profiling-extension-v1"
+
+- interrupts : Exactly 1 PPI must be listed. For heterogeneous systems where
+               SPE is only supported on a subset of the CPUs, please consult
+	       the arm,gic-v3 binding for details on describing a PPI partition.
+
+** Example:
+
+spe-pmu {
+        compatible = "arm,statistical-profiling-extension-v1";
+        interrupts = <GIC_PPI 05 IRQ_TYPE_LEVEL_HIGH &part1>;
+};

+ 53 - 0
Documentation/perf/hisi-pmu.txt

@@ -0,0 +1,53 @@
+HiSilicon SoC uncore Performance Monitoring Unit (PMU)
+======================================================
+The HiSilicon SoC chip includes various independent system device PMUs
+such as L3 cache (L3C), Hydra Home Agent (HHA) and DDRC. These PMUs are
+independent and have hardware logic to gather statistics and performance
+information.
+
+The HiSilicon SoC encapsulates multiple CPU and IO dies. Each CPU cluster
+(CCL) is made up of 4 cpu cores sharing one L3 cache; each CPU die is
+called Super CPU cluster (SCCL) and is made up of 6 CCLs. Each SCCL has
+two HHAs (0 - 1) and four DDRCs (0 - 3), respectively.
+
+HiSilicon SoC uncore PMU driver
+---------------------------------------
+Each device PMU has separate registers for event counting, control and
+interrupt, and the PMU driver shall register perf PMU drivers like L3C,
+HHA and DDRC etc. The available events and configuration options shall
+be described in the sysfs, see :
+/sys/devices/hisi_sccl{X}_<l3c{Y}/hha{Y}/ddrc{Y}>/, or
+/sys/bus/event_source/devices/hisi_sccl{X}_<l3c{Y}/hha{Y}/ddrc{Y}>.
+The "perf list" command shall list the available events from sysfs.
+
+Each L3C, HHA and DDRC is registered as a separate PMU with perf. The PMU
+name will appear in event listing as hisi_sccl<sccl-id>_module<index-id>.
+where "sccl-id" is the identifier of the SCCL and "index-id" is the index of
+module.
+e.g. hisi_sccl3_l3c0/rd_hit_cpipe is READ_HIT_CPIPE event of L3C index #0 in
+SCCL ID #3.
+e.g. hisi_sccl1_hha0/rx_operations is RX_OPERATIONS event of HHA index #0 in
+SCCL ID #1.
+
+The driver also provides a "cpumask" sysfs attribute, which shows the CPU core
+ID used to count the uncore PMU event.
+
+Example usage of perf:
+$# perf list
+hisi_sccl3_l3c0/rd_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+hisi_sccl3_l3c0/wr_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+hisi_sccl1_l3c0/rd_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+hisi_sccl1_l3c0/wr_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+
+$# perf stat -a -e hisi_sccl3_l3c0/rd_hit_cpipe/ sleep 5
+$# perf stat -a -e hisi_sccl3_l3c0/config=0x02/ sleep 5
+
+The current driver does not support sampling. So "perf record" is unsupported.
+Also attach to a task is unsupported as the events are all uncore.
+
+Note: Please contact the maintainer for a complete list of events supported for
+the PMU devices in the SoC and its information if needed.

+ 7 - 0
MAINTAINERS

@@ -6241,6 +6241,13 @@ S:	Maintained
 F:	drivers/net/ethernet/hisilicon/
 F:	drivers/net/ethernet/hisilicon/
 F:	Documentation/devicetree/bindings/net/hisilicon*.txt
 F:	Documentation/devicetree/bindings/net/hisilicon*.txt
 
 
+HISILICON PMU DRIVER
+M:	Shaokun Zhang <zhangshaokun@hisilicon.com>
+W:	http://www.hisilicon.com
+S:	Supported
+F:	drivers/perf/hisilicon
+F:	Documentation/perf/hisi-pmu.txt
+
 HISILICON ROCE DRIVER
 HISILICON ROCE DRIVER
 M:	Lijun Ou <oulijun@huawei.com>
 M:	Lijun Ou <oulijun@huawei.com>
 M:	Wei Hu(Xavier) <xavier.huwei@huawei.com>
 M:	Wei Hu(Xavier) <xavier.huwei@huawei.com>

+ 2 - 0
arch/arm64/include/asm/barrier.h

@@ -31,6 +31,8 @@
 #define dmb(opt)	asm volatile("dmb " #opt : : : "memory")
 #define dmb(opt)	asm volatile("dmb " #opt : : : "memory")
 #define dsb(opt)	asm volatile("dsb " #opt : : : "memory")
 #define dsb(opt)	asm volatile("dsb " #opt : : : "memory")
 
 
+#define psb_csync()	asm volatile("hint #17" : : : "memory")
+
 #define mb()		dsb(sy)
 #define mb()		dsb(sy)
 #define rmb()		dsb(ld)
 #define rmb()		dsb(ld)
 #define wmb()		dsb(st)
 #define wmb()		dsb(st)

+ 93 - 0
arch/arm64/include/asm/sysreg.h

@@ -172,6 +172,99 @@
 #define SYS_FAR_EL1			sys_reg(3, 0, 6, 0, 0)
 #define SYS_FAR_EL1			sys_reg(3, 0, 6, 0, 0)
 #define SYS_PAR_EL1			sys_reg(3, 0, 7, 4, 0)
 #define SYS_PAR_EL1			sys_reg(3, 0, 7, 4, 0)
 
 
+/*** Statistical Profiling Extension ***/
+/* ID registers */
+#define SYS_PMSIDR_EL1			sys_reg(3, 0, 9, 9, 7)
+#define SYS_PMSIDR_EL1_FE_SHIFT		0
+#define SYS_PMSIDR_EL1_FT_SHIFT		1
+#define SYS_PMSIDR_EL1_FL_SHIFT		2
+#define SYS_PMSIDR_EL1_ARCHINST_SHIFT	3
+#define SYS_PMSIDR_EL1_LDS_SHIFT	4
+#define SYS_PMSIDR_EL1_ERND_SHIFT	5
+#define SYS_PMSIDR_EL1_INTERVAL_SHIFT	8
+#define SYS_PMSIDR_EL1_INTERVAL_MASK	0xfUL
+#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT	12
+#define SYS_PMSIDR_EL1_MAXSIZE_MASK	0xfUL
+#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT	16
+#define SYS_PMSIDR_EL1_COUNTSIZE_MASK	0xfUL
+
+#define SYS_PMBIDR_EL1			sys_reg(3, 0, 9, 10, 7)
+#define SYS_PMBIDR_EL1_ALIGN_SHIFT	0
+#define SYS_PMBIDR_EL1_ALIGN_MASK	0xfU
+#define SYS_PMBIDR_EL1_P_SHIFT		4
+#define SYS_PMBIDR_EL1_F_SHIFT		5
+
+/* Sampling controls */
+#define SYS_PMSCR_EL1			sys_reg(3, 0, 9, 9, 0)
+#define SYS_PMSCR_EL1_E0SPE_SHIFT	0
+#define SYS_PMSCR_EL1_E1SPE_SHIFT	1
+#define SYS_PMSCR_EL1_CX_SHIFT		3
+#define SYS_PMSCR_EL1_PA_SHIFT		4
+#define SYS_PMSCR_EL1_TS_SHIFT		5
+#define SYS_PMSCR_EL1_PCT_SHIFT		6
+
+#define SYS_PMSCR_EL2			sys_reg(3, 4, 9, 9, 0)
+#define SYS_PMSCR_EL2_E0HSPE_SHIFT	0
+#define SYS_PMSCR_EL2_E2SPE_SHIFT	1
+#define SYS_PMSCR_EL2_CX_SHIFT		3
+#define SYS_PMSCR_EL2_PA_SHIFT		4
+#define SYS_PMSCR_EL2_TS_SHIFT		5
+#define SYS_PMSCR_EL2_PCT_SHIFT		6
+
+#define SYS_PMSICR_EL1			sys_reg(3, 0, 9, 9, 2)
+
+#define SYS_PMSIRR_EL1			sys_reg(3, 0, 9, 9, 3)
+#define SYS_PMSIRR_EL1_RND_SHIFT	0
+#define SYS_PMSIRR_EL1_INTERVAL_SHIFT	8
+#define SYS_PMSIRR_EL1_INTERVAL_MASK	0xffffffUL
+
+/* Filtering controls */
+#define SYS_PMSFCR_EL1			sys_reg(3, 0, 9, 9, 4)
+#define SYS_PMSFCR_EL1_FE_SHIFT		0
+#define SYS_PMSFCR_EL1_FT_SHIFT		1
+#define SYS_PMSFCR_EL1_FL_SHIFT		2
+#define SYS_PMSFCR_EL1_B_SHIFT		16
+#define SYS_PMSFCR_EL1_LD_SHIFT		17
+#define SYS_PMSFCR_EL1_ST_SHIFT		18
+
+#define SYS_PMSEVFR_EL1			sys_reg(3, 0, 9, 9, 5)
+#define SYS_PMSEVFR_EL1_RES0		0x0000ffff00ff0f55UL
+
+#define SYS_PMSLATFR_EL1		sys_reg(3, 0, 9, 9, 6)
+#define SYS_PMSLATFR_EL1_MINLAT_SHIFT	0
+
+/* Buffer controls */
+#define SYS_PMBLIMITR_EL1		sys_reg(3, 0, 9, 10, 0)
+#define SYS_PMBLIMITR_EL1_E_SHIFT	0
+#define SYS_PMBLIMITR_EL1_FM_SHIFT	1
+#define SYS_PMBLIMITR_EL1_FM_MASK	0x3UL
+#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ	(0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
+
+#define SYS_PMBPTR_EL1			sys_reg(3, 0, 9, 10, 1)
+
+/* Buffer error reporting */
+#define SYS_PMBSR_EL1			sys_reg(3, 0, 9, 10, 3)
+#define SYS_PMBSR_EL1_COLL_SHIFT	16
+#define SYS_PMBSR_EL1_S_SHIFT		17
+#define SYS_PMBSR_EL1_EA_SHIFT		18
+#define SYS_PMBSR_EL1_DL_SHIFT		19
+#define SYS_PMBSR_EL1_EC_SHIFT		26
+#define SYS_PMBSR_EL1_EC_MASK		0x3fUL
+
+#define SYS_PMBSR_EL1_EC_BUF		(0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S1	(0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S2	(0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
+
+#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT	0
+#define SYS_PMBSR_EL1_FAULT_FSC_MASK	0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_SHIFT	0
+#define SYS_PMBSR_EL1_BUF_BSC_MASK	0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_FULL	(0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
+
+/*** End of Statistical Profiling Extension ***/
+
 #define SYS_PMINTENSET_EL1		sys_reg(3, 0, 9, 14, 1)
 #define SYS_PMINTENSET_EL1		sys_reg(3, 0, 9, 14, 1)
 #define SYS_PMINTENCLR_EL1		sys_reg(3, 0, 9, 14, 2)
 #define SYS_PMINTENCLR_EL1		sys_reg(3, 0, 9, 14, 2)
 
 

+ 12 - 5
arch/arm64/kernel/head.S

@@ -480,14 +480,21 @@ set_hcr:
 
 
 	/* Statistical profiling */
 	/* Statistical profiling */
 	ubfx	x0, x1, #32, #4			// Check ID_AA64DFR0_EL1 PMSVer
 	ubfx	x0, x1, #32, #4			// Check ID_AA64DFR0_EL1 PMSVer
-	cbz	x0, 6f				// Skip if SPE not present
-	cbnz	x2, 5f				// VHE?
+	cbz	x0, 7f				// Skip if SPE not present
+	cbnz	x2, 6f				// VHE?
+	mrs_s	x4, SYS_PMBIDR_EL1		// If SPE available at EL2,
+	and	x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
+	cbnz	x4, 5f				// then permit sampling of physical
+	mov	x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
+		      1 << SYS_PMSCR_EL2_PA_SHIFT)
+	msr_s	SYS_PMSCR_EL2, x4		// addresses and physical counter
+5:
 	mov	x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
 	mov	x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
 	orr	x3, x3, x1			// If we don't have VHE, then
 	orr	x3, x3, x1			// If we don't have VHE, then
-	b	6f				// use EL1&0 translation.
-5:						// For VHE, use EL2 translation
+	b	7f				// use EL1&0 translation.
+6:						// For VHE, use EL2 translation
 	orr	x3, x3, #MDCR_EL2_TPMS		// and disable access from EL1
 	orr	x3, x3, #MDCR_EL2_TPMS		// and disable access from EL1
-6:
+7:
 	msr	mdcr_el2, x3			// Configure debug traps
 	msr	mdcr_el2, x3			// Configure debug traps
 
 
 	/* Stage-2 translation */
 	/* Stage-2 translation */

+ 7 - 17
arch/arm64/kvm/hyp/debug-sr.c

@@ -65,16 +65,6 @@
 	default:	write_debug(ptr[0], reg, 0);			\
 	default:	write_debug(ptr[0], reg, 0);			\
 	}
 	}
 
 
-#define PMSCR_EL1		sys_reg(3, 0, 9, 9, 0)
-
-#define PMBLIMITR_EL1		sys_reg(3, 0, 9, 10, 0)
-#define PMBLIMITR_EL1_E		BIT(0)
-
-#define PMBIDR_EL1		sys_reg(3, 0, 9, 10, 7)
-#define PMBIDR_EL1_P		BIT(4)
-
-#define psb_csync()		asm volatile("hint #17")
-
 static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
 static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
 {
 {
 	/* The vcpu can run. but it can't hide. */
 	/* The vcpu can run. but it can't hide. */
@@ -90,18 +80,18 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 		return;
 		return;
 
 
 	/* Yes; is it owned by EL3? */
 	/* Yes; is it owned by EL3? */
-	reg = read_sysreg_s(PMBIDR_EL1);
-	if (reg & PMBIDR_EL1_P)
+	reg = read_sysreg_s(SYS_PMBIDR_EL1);
+	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
 		return;
 		return;
 
 
 	/* No; is the host actually using the thing? */
 	/* No; is the host actually using the thing? */
-	reg = read_sysreg_s(PMBLIMITR_EL1);
-	if (!(reg & PMBLIMITR_EL1_E))
+	reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
+	if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
 		return;
 		return;
 
 
 	/* Yes; save the control register and disable data generation */
 	/* Yes; save the control register and disable data generation */
-	*pmscr_el1 = read_sysreg_s(PMSCR_EL1);
-	write_sysreg_s(0, PMSCR_EL1);
+	*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
+	write_sysreg_s(0, SYS_PMSCR_EL1);
 	isb();
 	isb();
 
 
 	/* Now drain all buffered data to memory */
 	/* Now drain all buffered data to memory */
@@ -122,7 +112,7 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
 	isb();
 	isb();
 
 
 	/* Re-enable data generation */
 	/* Re-enable data generation */
-	write_sysreg_s(pmscr_el1, PMSCR_EL1);
+	write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
 }
 }
 
 
 void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
 void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,

+ 15 - 0
drivers/perf/Kconfig

@@ -17,6 +17,13 @@ config ARM_PMU_ACPI
 	depends on ARM_PMU && ACPI
 	depends on ARM_PMU && ACPI
 	def_bool y
 	def_bool y
 
 
+config HISI_PMU
+       bool "HiSilicon SoC PMU"
+       depends on ARM64 && ACPI
+       help
+         Support for HiSilicon SoC uncore performance monitoring
+         unit (PMU), such as: L3C, HHA and DDRC.
+
 config QCOM_L2_PMU
 config QCOM_L2_PMU
 	bool "Qualcomm Technologies L2-cache PMU"
 	bool "Qualcomm Technologies L2-cache PMU"
 	depends on ARCH_QCOM && ARM64 && ACPI
 	depends on ARCH_QCOM && ARM64 && ACPI
@@ -43,4 +50,12 @@ config XGENE_PMU
         help
         help
           Say y if you want to use APM X-Gene SoC performance monitors.
           Say y if you want to use APM X-Gene SoC performance monitors.
 
 
+config ARM_SPE_PMU
+	tristate "Enable support for the ARMv8.2 Statistical Profiling Extension"
+	depends on PERF_EVENTS && ARM64
+	help
+	  Enable perf support for the ARMv8.2 Statistical Profiling
+	  Extension, which provides periodic sampling of operations in
+	  the CPU pipeline and reports this via the perf AUX interface.
+
 endmenu
 endmenu

+ 2 - 0
drivers/perf/Makefile

@@ -1,5 +1,7 @@
 obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
 obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
 obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
 obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
+obj-$(CONFIG_HISI_PMU) += hisilicon/
 obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
 obj-$(CONFIG_QCOM_L2_PMU)	+= qcom_l2_pmu.o
 obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
 obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
 obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
 obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
+obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o

+ 1248 - 0
drivers/perf/arm_spe_pmu.c

@@ -0,0 +1,1248 @@
+/*
+ * Perf support for the Statistical Profiling Extension, introduced as
+ * part of ARMv8.2.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2016 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define PMUNAME					"arm_spe"
+#define DRVNAME					PMUNAME "_pmu"
+#define pr_fmt(fmt)				DRVNAME ": " fmt
+
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/sysreg.h>
+
+#define ARM_SPE_BUF_PAD_BYTE			0
+
+struct arm_spe_pmu_buf {
+	int					nr_pages;
+	bool					snapshot;
+	void					*base;
+};
+
+struct arm_spe_pmu {
+	struct pmu				pmu;
+	struct platform_device			*pdev;
+	cpumask_t				supported_cpus;
+	struct hlist_node			hotplug_node;
+
+	int					irq; /* PPI */
+
+	u16					min_period;
+	u16					counter_sz;
+
+#define SPE_PMU_FEAT_FILT_EVT			(1UL << 0)
+#define SPE_PMU_FEAT_FILT_TYP			(1UL << 1)
+#define SPE_PMU_FEAT_FILT_LAT			(1UL << 2)
+#define SPE_PMU_FEAT_ARCH_INST			(1UL << 3)
+#define SPE_PMU_FEAT_LDS			(1UL << 4)
+#define SPE_PMU_FEAT_ERND			(1UL << 5)
+#define SPE_PMU_FEAT_DEV_PROBED			(1UL << 63)
+	u64					features;
+
+	u16					max_record_sz;
+	u16					align;
+	struct perf_output_handle __percpu	*handle;
+};
+
+#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
+
+/* Convert a free-running index from perf into an SPE buffer offset */
+#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/* Keep track of our dynamic hotplug state */
+static enum cpuhp_state arm_spe_pmu_online;
+
+enum arm_spe_pmu_buf_fault_action {
+	SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
+	SPE_PMU_BUF_FAULT_ACT_FATAL,
+	SPE_PMU_BUF_FAULT_ACT_OK,
+};
+
+/* This sysfs gunk was really good fun to write. */
+enum arm_spe_pmu_capabilities {
+	SPE_PMU_CAP_ARCH_INST = 0,
+	SPE_PMU_CAP_ERND,
+	SPE_PMU_CAP_FEAT_MAX,
+	SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
+	SPE_PMU_CAP_MIN_IVAL,
+};
+
+static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
+	[SPE_PMU_CAP_ARCH_INST]	= SPE_PMU_FEAT_ARCH_INST,
+	[SPE_PMU_CAP_ERND]	= SPE_PMU_FEAT_ERND,
+};
+
+static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
+{
+	if (cap < SPE_PMU_CAP_FEAT_MAX)
+		return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
+
+	switch (cap) {
+	case SPE_PMU_CAP_CNT_SZ:
+		return spe_pmu->counter_sz;
+	case SPE_PMU_CAP_MIN_IVAL:
+		return spe_pmu->min_period;
+	default:
+		WARN(1, "unknown cap %d\n", cap);
+	}
+
+	return 0;
+}
+
+static ssize_t arm_spe_pmu_cap_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+	struct dev_ext_attribute *ea =
+		container_of(attr, struct dev_ext_attribute, attr);
+	int cap = (long)ea->var;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		arm_spe_pmu_cap_get(spe_pmu, cap));
+}
+
+#define SPE_EXT_ATTR_ENTRY(_name, _func, _var)				\
+	&((struct dev_ext_attribute[]) {				\
+		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var }	\
+	})[0].attr.attr
+
+#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var)				\
+	SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
+
+static struct attribute *arm_spe_pmu_cap_attr[] = {
+	SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
+	SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
+	SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
+	SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
+	NULL,
+};
+
+static struct attribute_group arm_spe_pmu_cap_group = {
+	.name	= "caps",
+	.attrs	= arm_spe_pmu_cap_attr,
+};
+
+/* User ABI */
+#define ATTR_CFG_FLD_ts_enable_CFG		config	/* PMSCR_EL1.TS */
+#define ATTR_CFG_FLD_ts_enable_LO		0
+#define ATTR_CFG_FLD_ts_enable_HI		0
+#define ATTR_CFG_FLD_pa_enable_CFG		config	/* PMSCR_EL1.PA */
+#define ATTR_CFG_FLD_pa_enable_LO		1
+#define ATTR_CFG_FLD_pa_enable_HI		1
+#define ATTR_CFG_FLD_pct_enable_CFG		config	/* PMSCR_EL1.PCT */
+#define ATTR_CFG_FLD_pct_enable_LO		2
+#define ATTR_CFG_FLD_pct_enable_HI		2
+#define ATTR_CFG_FLD_jitter_CFG			config	/* PMSIRR_EL1.RND */
+#define ATTR_CFG_FLD_jitter_LO			16
+#define ATTR_CFG_FLD_jitter_HI			16
+#define ATTR_CFG_FLD_branch_filter_CFG		config	/* PMSFCR_EL1.B */
+#define ATTR_CFG_FLD_branch_filter_LO		32
+#define ATTR_CFG_FLD_branch_filter_HI		32
+#define ATTR_CFG_FLD_load_filter_CFG		config	/* PMSFCR_EL1.LD */
+#define ATTR_CFG_FLD_load_filter_LO		33
+#define ATTR_CFG_FLD_load_filter_HI		33
+#define ATTR_CFG_FLD_store_filter_CFG		config	/* PMSFCR_EL1.ST */
+#define ATTR_CFG_FLD_store_filter_LO		34
+#define ATTR_CFG_FLD_store_filter_HI		34
+
+#define ATTR_CFG_FLD_event_filter_CFG		config1	/* PMSEVFR_EL1 */
+#define ATTR_CFG_FLD_event_filter_LO		0
+#define ATTR_CFG_FLD_event_filter_HI		63
+
+#define ATTR_CFG_FLD_min_latency_CFG		config2	/* PMSLATFR_EL1.MINLAT */
+#define ATTR_CFG_FLD_min_latency_LO		0
+#define ATTR_CFG_FLD_min_latency_HI		11
+
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)				\
+	(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi)				\
+	__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name)					\
+	PMU_FORMAT_ATTR(name,						\
+	_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG,			\
+			     ATTR_CFG_FLD_##name##_LO,			\
+			     ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi)				\
+	((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name)					\
+	_ATTR_CFG_GET_FLD(attr,						\
+			  ATTR_CFG_FLD_##name##_CFG,			\
+			  ATTR_CFG_FLD_##name##_LO,			\
+			  ATTR_CFG_FLD_##name##_HI)
+
+GEN_PMU_FORMAT_ATTR(ts_enable);
+GEN_PMU_FORMAT_ATTR(pa_enable);
+GEN_PMU_FORMAT_ATTR(pct_enable);
+GEN_PMU_FORMAT_ATTR(jitter);
+GEN_PMU_FORMAT_ATTR(branch_filter);
+GEN_PMU_FORMAT_ATTR(load_filter);
+GEN_PMU_FORMAT_ATTR(store_filter);
+GEN_PMU_FORMAT_ATTR(event_filter);
+GEN_PMU_FORMAT_ATTR(min_latency);
+
+static struct attribute *arm_spe_pmu_formats_attr[] = {
+	&format_attr_ts_enable.attr,
+	&format_attr_pa_enable.attr,
+	&format_attr_pct_enable.attr,
+	&format_attr_jitter.attr,
+	&format_attr_branch_filter.attr,
+	&format_attr_load_filter.attr,
+	&format_attr_store_filter.attr,
+	&format_attr_event_filter.attr,
+	&format_attr_min_latency.attr,
+	NULL,
+};
+
+static struct attribute_group arm_spe_pmu_format_group = {
+	.name	= "format",
+	.attrs	= arm_spe_pmu_formats_attr,
+};
+
+static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+
+	return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
+}
+static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *arm_spe_pmu_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static struct attribute_group arm_spe_pmu_group = {
+	.attrs	= arm_spe_pmu_attrs,
+};
+
+static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
+	&arm_spe_pmu_group,
+	&arm_spe_pmu_cap_group,
+	&arm_spe_pmu_format_group,
+	NULL,
+};
+
+/* Convert between user ABI and register values */
+static u64 arm_spe_event_to_pmscr(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	u64 reg = 0;
+
+	reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
+	reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
+	reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
+
+	if (!attr->exclude_user)
+		reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
+
+	if (!attr->exclude_kernel)
+		reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
+
+	if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && capable(CAP_SYS_ADMIN))
+		reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
+
+	return reg;
+}
+
+static void arm_spe_event_sanitise_period(struct perf_event *event)
+{
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+	u64 period = event->hw.sample_period;
+	u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
+			 << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
+
+	if (period < spe_pmu->min_period)
+		period = spe_pmu->min_period;
+	else if (period > max_period)
+		period = max_period;
+	else
+		period &= max_period;
+
+	event->hw.sample_period = period;
+}
+
+static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	u64 reg = 0;
+
+	arm_spe_event_sanitise_period(event);
+
+	reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
+	reg |= event->hw.sample_period;
+
+	return reg;
+}
+
+static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	u64 reg = 0;
+
+	reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
+	reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
+	reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
+
+	if (reg)
+		reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
+
+	if (ATTR_CFG_GET_FLD(attr, event_filter))
+		reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
+
+	if (ATTR_CFG_GET_FLD(attr, min_latency))
+		reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
+
+	return reg;
+}
+
+static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	return ATTR_CFG_GET_FLD(attr, event_filter);
+}
+
+static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
+{
+	struct perf_event_attr *attr = &event->attr;
+	return ATTR_CFG_GET_FLD(attr, min_latency)
+	       << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
+}
+
+static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
+{
+	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+	u64 head = PERF_IDX2OFF(handle->head, buf);
+
+	memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
+	if (!buf->snapshot)
+		perf_aux_output_skip(handle, len);
+}
+
+static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
+{
+	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+	u64 head = PERF_IDX2OFF(handle->head, buf);
+	u64 limit = buf->nr_pages * PAGE_SIZE;
+
+	/*
+	 * The trace format isn't parseable in reverse, so clamp
+	 * the limit to half of the buffer size in snapshot mode
+	 * so that the worst case is half a buffer of records, as
+	 * opposed to a single record.
+	 */
+	if (head < limit >> 1)
+		limit >>= 1;
+
+	/*
+	 * If we're within max_record_sz of the limit, we must
+	 * pad, move the head index and recompute the limit.
+	 */
+	if (limit - head < spe_pmu->max_record_sz) {
+		arm_spe_pmu_pad_buf(handle, limit - head);
+		handle->head = PERF_IDX2OFF(limit, buf);
+		limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
+	}
+
+	return limit;
+}
+
+static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
+{
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+	const u64 bufsize = buf->nr_pages * PAGE_SIZE;
+	u64 limit = bufsize;
+	u64 head, tail, wakeup;
+
+	/*
+	 * The head can be misaligned for two reasons:
+	 *
+	 * 1. The hardware left PMBPTR pointing to the first byte after
+	 *    a record when generating a buffer management event.
+	 *
+	 * 2. We used perf_aux_output_skip to consume handle->size bytes
+	 *    and CIRC_SPACE was used to compute the size, which always
+	 *    leaves one entry free.
+	 *
+	 * Deal with this by padding to the next alignment boundary and
+	 * moving the head index. If we run out of buffer space, we'll
+	 * reduce handle->size to zero and end up reporting truncation.
+	 */
+	head = PERF_IDX2OFF(handle->head, buf);
+	if (!IS_ALIGNED(head, spe_pmu->align)) {
+		unsigned long delta = roundup(head, spe_pmu->align) - head;
+
+		delta = min(delta, handle->size);
+		arm_spe_pmu_pad_buf(handle, delta);
+		head = PERF_IDX2OFF(handle->head, buf);
+	}
+
+	/* If we've run out of free space, then nothing more to do */
+	if (!handle->size)
+		goto no_space;
+
+	/* Compute the tail and wakeup indices now that we've aligned head */
+	tail = PERF_IDX2OFF(handle->head + handle->size, buf);
+	wakeup = PERF_IDX2OFF(handle->wakeup, buf);
+
+	/*
+	 * Avoid clobbering unconsumed data. We know we have space, so
+	 * if we see head == tail we know that the buffer is empty. If
+	 * head > tail, then there's nothing to clobber prior to
+	 * wrapping.
+	 */
+	if (head < tail)
+		limit = round_down(tail, PAGE_SIZE);
+
+	/*
+	 * Wakeup may be arbitrarily far into the future. If it's not in
+	 * the current generation, either we'll wrap before hitting it,
+	 * or it's in the past and has been handled already.
+	 *
+	 * If there's a wakeup before we wrap, arrange to be woken up by
+	 * the page boundary following it. Keep the tail boundary if
+	 * that's lower.
+	 */
+	if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
+		limit = min(limit, round_up(wakeup, PAGE_SIZE));
+
+	if (limit > head)
+		return limit;
+
+	arm_spe_pmu_pad_buf(handle, handle->size);
+no_space:
+	perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+	perf_aux_output_end(handle, 0);
+	return 0;
+}
+
+static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
+{
+	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+	u64 limit = __arm_spe_pmu_next_off(handle);
+	u64 head = PERF_IDX2OFF(handle->head, buf);
+
+	/*
+	 * If the head has come too close to the end of the buffer,
+	 * then pad to the end and recompute the limit.
+	 */
+	if (limit && (limit - head < spe_pmu->max_record_sz)) {
+		arm_spe_pmu_pad_buf(handle, limit - head);
+		limit = __arm_spe_pmu_next_off(handle);
+	}
+
+	return limit;
+}
+
+static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
+					  struct perf_event *event)
+{
+	u64 base, limit;
+	struct arm_spe_pmu_buf *buf;
+
+	/* Start a new aux session */
+	buf = perf_aux_output_begin(handle, event);
+	if (!buf) {
+		event->hw.state |= PERF_HES_STOPPED;
+		/*
+		 * We still need to clear the limit pointer, since the
+		 * profiler might only be disabled by virtue of a fault.
+		 */
+		limit = 0;
+		goto out_write_limit;
+	}
+
+	limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
+			      : arm_spe_pmu_next_off(handle);
+	if (limit)
+		limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
+
+	limit += (u64)buf->base;
+	base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
+	write_sysreg_s(base, SYS_PMBPTR_EL1);
+
+out_write_limit:
+	write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
+}
+
+static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
+{
+	struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+	u64 offset, size;
+
+	offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
+	size = offset - PERF_IDX2OFF(handle->head, buf);
+
+	if (buf->snapshot)
+		handle->head = offset;
+
+	perf_aux_output_end(handle, size);
+}
+
+static void arm_spe_pmu_disable_and_drain_local(void)
+{
+	/* Disable profiling at EL0 and EL1 */
+	write_sysreg_s(0, SYS_PMSCR_EL1);
+	isb();
+
+	/* Drain any buffered data */
+	psb_csync();
+	dsb(nsh);
+
+	/* Disable the profiling buffer */
+	write_sysreg_s(0, SYS_PMBLIMITR_EL1);
+	isb();
+}
+
+/* IRQ handling */
+static enum arm_spe_pmu_buf_fault_action
+arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
+{
+	const char *err_str;
+	u64 pmbsr;
+	enum arm_spe_pmu_buf_fault_action ret;
+
+	/*
+	 * Ensure new profiling data is visible to the CPU and any external
+	 * aborts have been resolved.
+	 */
+	psb_csync();
+	dsb(nsh);
+
+	/* Ensure hardware updates to PMBPTR_EL1 are visible */
+	isb();
+
+	/* Service required? */
+	pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
+	if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
+		return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
+
+	/*
+	 * If we've lost data, disable profiling and also set the PARTIAL
+	 * flag to indicate that the last record is corrupted.
+	 */
+	if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
+		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
+					     PERF_AUX_FLAG_PARTIAL);
+
+	/* Report collisions to userspace so that it can up the period */
+	if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
+		perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
+
+	/* We only expect buffer management events */
+	switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
+	case SYS_PMBSR_EL1_EC_BUF:
+		/* Handled below */
+		break;
+	case SYS_PMBSR_EL1_EC_FAULT_S1:
+	case SYS_PMBSR_EL1_EC_FAULT_S2:
+		err_str = "Unexpected buffer fault";
+		goto out_err;
+	default:
+		err_str = "Unknown error code";
+		goto out_err;
+	}
+
+	/* Buffer management event */
+	switch (pmbsr &
+		(SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
+	case SYS_PMBSR_EL1_BUF_BSC_FULL:
+		ret = SPE_PMU_BUF_FAULT_ACT_OK;
+		goto out_stop;
+	default:
+		err_str = "Unknown buffer status code";
+	}
+
+out_err:
+	pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
+			   err_str, smp_processor_id(), pmbsr,
+			   read_sysreg_s(SYS_PMBPTR_EL1),
+			   read_sysreg_s(SYS_PMBLIMITR_EL1));
+	ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
+
+out_stop:
+	arm_spe_perf_aux_output_end(handle);
+	return ret;
+}
+
+static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
+{
+	struct perf_output_handle *handle = dev;
+	struct perf_event *event = handle->event;
+	enum arm_spe_pmu_buf_fault_action act;
+
+	if (!perf_get_aux(handle))
+		return IRQ_NONE;
+
+	act = arm_spe_pmu_buf_get_fault_act(handle);
+	if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
+		return IRQ_NONE;
+
+	/*
+	 * Ensure perf callbacks have completed, which may disable the
+	 * profiling buffer in response to a TRUNCATION flag.
+	 */
+	irq_work_run();
+
+	switch (act) {
+	case SPE_PMU_BUF_FAULT_ACT_FATAL:
+		/*
+		 * If a fatal exception occurred then leaving the profiling
+		 * buffer enabled is a recipe waiting to happen. Since
+		 * fatal faults don't always imply truncation, make sure
+		 * that the profiling buffer is disabled explicitly before
+		 * clearing the syndrome register.
+		 */
+		arm_spe_pmu_disable_and_drain_local();
+		break;
+	case SPE_PMU_BUF_FAULT_ACT_OK:
+		/*
+		 * We handled the fault (the buffer was full), so resume
+		 * profiling as long as we didn't detect truncation.
+		 * PMBPTR might be misaligned, but we'll burn that bridge
+		 * when we get to it.
+		 */
+		if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
+			arm_spe_perf_aux_output_begin(handle, event);
+			isb();
+		}
+		break;
+	case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
+		/* We've seen you before, but GCC has the memory of a sieve. */
+		break;
+	}
+
+	/* The buffer pointers are now sane, so resume profiling. */
+	write_sysreg_s(0, SYS_PMBSR_EL1);
+	return IRQ_HANDLED;
+}
+
+/* Perf callbacks */
+static int arm_spe_pmu_event_init(struct perf_event *event)
+{
+	u64 reg;
+	struct perf_event_attr *attr = &event->attr;
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+
+	/* This is, of course, deeply driver-specific */
+	if (attr->type != event->pmu->type)
+		return -ENOENT;
+
+	if (event->cpu >= 0 &&
+	    !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
+		return -ENOENT;
+
+	if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0)
+		return -EOPNOTSUPP;
+
+	if (attr->exclude_idle)
+		return -EOPNOTSUPP;
+
+	/*
+	 * Feedback-directed frequency throttling doesn't work when we
+	 * have a buffer of samples. We'd need to manually count the
+	 * samples in the buffer when it fills up and adjust the event
+	 * count to reflect that. Instead, just force the user to specify
+	 * a sample period.
+	 */
+	if (attr->freq)
+		return -EINVAL;
+
+	reg = arm_spe_event_to_pmsfcr(event);
+	if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
+	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
+		return -EOPNOTSUPP;
+
+	if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
+	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
+		return -EOPNOTSUPP;
+
+	if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
+	    !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
+		return -EOPNOTSUPP;
+
+	reg = arm_spe_event_to_pmscr(event);
+	if (!capable(CAP_SYS_ADMIN) &&
+	    (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
+		    BIT(SYS_PMSCR_EL1_CX_SHIFT) |
+		    BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
+		return -EACCES;
+
+	return 0;
+}
+
+static void arm_spe_pmu_start(struct perf_event *event, int flags)
+{
+	u64 reg;
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
+
+	hwc->state = 0;
+	arm_spe_perf_aux_output_begin(handle, event);
+	if (hwc->state)
+		return;
+
+	reg = arm_spe_event_to_pmsfcr(event);
+	write_sysreg_s(reg, SYS_PMSFCR_EL1);
+
+	reg = arm_spe_event_to_pmsevfr(event);
+	write_sysreg_s(reg, SYS_PMSEVFR_EL1);
+
+	reg = arm_spe_event_to_pmslatfr(event);
+	write_sysreg_s(reg, SYS_PMSLATFR_EL1);
+
+	if (flags & PERF_EF_RELOAD) {
+		reg = arm_spe_event_to_pmsirr(event);
+		write_sysreg_s(reg, SYS_PMSIRR_EL1);
+		isb();
+		reg = local64_read(&hwc->period_left);
+		write_sysreg_s(reg, SYS_PMSICR_EL1);
+	}
+
+	reg = arm_spe_event_to_pmscr(event);
+	isb();
+	write_sysreg_s(reg, SYS_PMSCR_EL1);
+}
+
+static void arm_spe_pmu_stop(struct perf_event *event, int flags)
+{
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
+
+	/* If we're already stopped, then nothing to do */
+	if (hwc->state & PERF_HES_STOPPED)
+		return;
+
+	/* Stop all trace generation */
+	arm_spe_pmu_disable_and_drain_local();
+
+	if (flags & PERF_EF_UPDATE) {
+		/*
+		 * If there's a fault pending then ensure we contain it
+		 * to this buffer, since we might be on the context-switch
+		 * path.
+		 */
+		if (perf_get_aux(handle)) {
+			enum arm_spe_pmu_buf_fault_action act;
+
+			act = arm_spe_pmu_buf_get_fault_act(handle);
+			if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
+				arm_spe_perf_aux_output_end(handle);
+			else
+				write_sysreg_s(0, SYS_PMBSR_EL1);
+		}
+
+		/*
+		 * This may also contain ECOUNT, but nobody else should
+		 * be looking at period_left, since we forbid frequency
+		 * based sampling.
+		 */
+		local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
+		hwc->state |= PERF_HES_UPTODATE;
+	}
+
+	hwc->state |= PERF_HES_STOPPED;
+}
+
+static int arm_spe_pmu_add(struct perf_event *event, int flags)
+{
+	int ret = 0;
+	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
+
+	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+		return -ENOENT;
+
+	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+	if (flags & PERF_EF_START) {
+		arm_spe_pmu_start(event, PERF_EF_RELOAD);
+		if (hwc->state & PERF_HES_STOPPED)
+			ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void arm_spe_pmu_del(struct perf_event *event, int flags)
+{
+	arm_spe_pmu_stop(event, PERF_EF_UPDATE);
+}
+
+static void arm_spe_pmu_read(struct perf_event *event)
+{
+}
+
+static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
+				   bool snapshot)
+{
+	int i;
+	struct page **pglist;
+	struct arm_spe_pmu_buf *buf;
+
+	/* We need at least two pages for this to work. */
+	if (nr_pages < 2)
+		return NULL;
+
+	/*
+	 * We require an even number of pages for snapshot mode, so that
+	 * we can effectively treat the buffer as consisting of two equal
+	 * parts and give userspace a fighting chance of getting some
+	 * useful data out of it.
+	 */
+	if (!nr_pages || (snapshot && (nr_pages & 1)))
+		return NULL;
+
+	if (cpu == -1)
+		cpu = raw_smp_processor_id();
+
+	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
+	if (!buf)
+		return NULL;
+
+	pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+	if (!pglist)
+		goto out_free_buf;
+
+	for (i = 0; i < nr_pages; ++i) {
+		struct page *page = virt_to_page(pages[i]);
+
+		if (PagePrivate(page)) {
+			pr_warn("unexpected high-order page for auxbuf!");
+			goto out_free_pglist;
+		}
+
+		pglist[i] = virt_to_page(pages[i]);
+	}
+
+	buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
+	if (!buf->base)
+		goto out_free_pglist;
+
+	buf->nr_pages	= nr_pages;
+	buf->snapshot	= snapshot;
+
+	kfree(pglist);
+	return buf;
+
+out_free_pglist:
+	kfree(pglist);
+out_free_buf:
+	kfree(buf);
+	return NULL;
+}
+
+static void arm_spe_pmu_free_aux(void *aux)
+{
+	struct arm_spe_pmu_buf *buf = aux;
+
+	vunmap(buf->base);
+	kfree(buf);
+}
+
+/* Initialisation and teardown functions */
+static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
+{
+	static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+	int idx;
+	char *name;
+	struct device *dev = &spe_pmu->pdev->dev;
+
+	spe_pmu->pmu = (struct pmu) {
+		.capabilities	= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
+		.attr_groups	= arm_spe_pmu_attr_groups,
+		/*
+		 * We hitch a ride on the software context here, so that
+		 * we can support per-task profiling (which is not possible
+		 * with the invalid context as it doesn't get sched callbacks).
+		 * This requires that userspace either uses a dummy event for
+		 * perf_event_open, since the aux buffer is not setup until
+		 * a subsequent mmap, or creates the profiling event in a
+		 * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
+		 * once the buffer has been created.
+		 */
+		.task_ctx_nr	= perf_sw_context,
+		.event_init	= arm_spe_pmu_event_init,
+		.add		= arm_spe_pmu_add,
+		.del		= arm_spe_pmu_del,
+		.start		= arm_spe_pmu_start,
+		.stop		= arm_spe_pmu_stop,
+		.read		= arm_spe_pmu_read,
+		.setup_aux	= arm_spe_pmu_setup_aux,
+		.free_aux	= arm_spe_pmu_free_aux,
+	};
+
+	idx = atomic_inc_return(&pmu_idx);
+	name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
+	return perf_pmu_register(&spe_pmu->pmu, name, -1);
+}
+
+static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
+{
+	perf_pmu_unregister(&spe_pmu->pmu);
+}
+
+static void __arm_spe_pmu_dev_probe(void *info)
+{
+	int fld;
+	u64 reg;
+	struct arm_spe_pmu *spe_pmu = info;
+	struct device *dev = &spe_pmu->pdev->dev;
+
+	fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
+						   ID_AA64DFR0_PMSVER_SHIFT);
+	if (!fld) {
+		dev_err(dev,
+			"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
+			fld, smp_processor_id());
+		return;
+	}
+
+	/* Read PMBIDR first to determine whether or not we have access */
+	reg = read_sysreg_s(SYS_PMBIDR_EL1);
+	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
+		dev_err(dev,
+			"profiling buffer owned by higher exception level\n");
+		return;
+	}
+
+	/* Minimum alignment. If it's out-of-range, then fail the probe */
+	fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
+	spe_pmu->align = 1 << fld;
+	if (spe_pmu->align > SZ_2K) {
+		dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
+			fld, smp_processor_id());
+		return;
+	}
+
+	/* It's now safe to read PMSIDR and figure out what we've got */
+	reg = read_sysreg_s(SYS_PMSIDR_EL1);
+	if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
+		spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
+
+	if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
+		spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
+
+	if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
+		spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
+
+	if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
+		spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
+
+	if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
+		spe_pmu->features |= SPE_PMU_FEAT_LDS;
+
+	if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
+		spe_pmu->features |= SPE_PMU_FEAT_ERND;
+
+	/* This field has a spaced out encoding, so just use a look-up */
+	fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
+	switch (fld) {
+	case 0:
+		spe_pmu->min_period = 256;
+		break;
+	case 2:
+		spe_pmu->min_period = 512;
+		break;
+	case 3:
+		spe_pmu->min_period = 768;
+		break;
+	case 4:
+		spe_pmu->min_period = 1024;
+		break;
+	case 5:
+		spe_pmu->min_period = 1536;
+		break;
+	case 6:
+		spe_pmu->min_period = 2048;
+		break;
+	case 7:
+		spe_pmu->min_period = 3072;
+		break;
+	default:
+		dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
+			 fld);
+		/* Fallthrough */
+	case 8:
+		spe_pmu->min_period = 4096;
+	}
+
+	/* Maximum record size. If it's out-of-range, then fail the probe */
+	fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
+	spe_pmu->max_record_sz = 1 << fld;
+	if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
+		dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
+			fld, smp_processor_id());
+		return;
+	}
+
+	fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
+	switch (fld) {
+	default:
+		dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
+			 fld);
+		/* Fallthrough */
+	case 2:
+		spe_pmu->counter_sz = 12;
+	}
+
+	dev_info(dev,
+		 "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
+		 cpumask_pr_args(&spe_pmu->supported_cpus),
+		 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
+
+	spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
+	return;
+}
+
+static void __arm_spe_pmu_reset_local(void)
+{
+	/*
+	 * This is probably overkill, as we have no idea where we're
+	 * draining any buffered data to...
+	 */
+	arm_spe_pmu_disable_and_drain_local();
+
+	/* Reset the buffer base pointer */
+	write_sysreg_s(0, SYS_PMBPTR_EL1);
+	isb();
+
+	/* Clear any pending management interrupts */
+	write_sysreg_s(0, SYS_PMBSR_EL1);
+	isb();
+}
+
+static void __arm_spe_pmu_setup_one(void *info)
+{
+	struct arm_spe_pmu *spe_pmu = info;
+
+	__arm_spe_pmu_reset_local();
+	enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
+}
+
+static void __arm_spe_pmu_stop_one(void *info)
+{
+	struct arm_spe_pmu *spe_pmu = info;
+
+	disable_percpu_irq(spe_pmu->irq);
+	__arm_spe_pmu_reset_local();
+}
+
+static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_spe_pmu *spe_pmu;
+
+	spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
+	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+		return 0;
+
+	__arm_spe_pmu_setup_one(spe_pmu);
+	return 0;
+}
+
+static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_spe_pmu *spe_pmu;
+
+	spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
+	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+		return 0;
+
+	__arm_spe_pmu_stop_one(spe_pmu);
+	return 0;
+}
+
+static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
+{
+	int ret;
+	cpumask_t *mask = &spe_pmu->supported_cpus;
+
+	/* Make sure we probe the hardware on a relevant CPU */
+	ret = smp_call_function_any(mask,  __arm_spe_pmu_dev_probe, spe_pmu, 1);
+	if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
+		return -ENXIO;
+
+	/* Request our PPIs (note that the IRQ is still disabled) */
+	ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
+				 spe_pmu->handle);
+	if (ret)
+		return ret;
+
+	/*
+	 * Register our hotplug notifier now so we don't miss any events.
+	 * This will enable the IRQ for any supported CPUs that are already
+	 * up.
+	 */
+	ret = cpuhp_state_add_instance(arm_spe_pmu_online,
+				       &spe_pmu->hotplug_node);
+	if (ret)
+		free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
+
+	return ret;
+}
+
+static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
+{
+	cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
+	free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
+}
+
+/* Driver and device probing */
+static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
+{
+	struct platform_device *pdev = spe_pmu->pdev;
+	int irq = platform_get_irq(pdev, 0);
+
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get IRQ (%d)\n", irq);
+		return -ENXIO;
+	}
+
+	if (!irq_is_percpu(irq)) {
+		dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
+		return -EINVAL;
+	}
+
+	if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
+		dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
+		return -EINVAL;
+	}
+
+	spe_pmu->irq = irq;
+	return 0;
+}
+
+static const struct of_device_id arm_spe_pmu_of_match[] = {
+	{ .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
+	{ /* Sentinel */ },
+};
+
+static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct arm_spe_pmu *spe_pmu;
+	struct device *dev = &pdev->dev;
+
+	spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
+	if (!spe_pmu) {
+		dev_err(dev, "failed to allocate spe_pmu\n");
+		return -ENOMEM;
+	}
+
+	spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
+	if (!spe_pmu->handle)
+		return -ENOMEM;
+
+	spe_pmu->pdev = pdev;
+	platform_set_drvdata(pdev, spe_pmu);
+
+	ret = arm_spe_pmu_irq_probe(spe_pmu);
+	if (ret)
+		goto out_free_handle;
+
+	ret = arm_spe_pmu_dev_init(spe_pmu);
+	if (ret)
+		goto out_free_handle;
+
+	ret = arm_spe_pmu_perf_init(spe_pmu);
+	if (ret)
+		goto out_teardown_dev;
+
+	return 0;
+
+out_teardown_dev:
+	arm_spe_pmu_dev_teardown(spe_pmu);
+out_free_handle:
+	free_percpu(spe_pmu->handle);
+	return ret;
+}
+
+static int arm_spe_pmu_device_remove(struct platform_device *pdev)
+{
+	struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+
+	arm_spe_pmu_perf_destroy(spe_pmu);
+	arm_spe_pmu_dev_teardown(spe_pmu);
+	free_percpu(spe_pmu->handle);
+	return 0;
+}
+
+static struct platform_driver arm_spe_pmu_driver = {
+	.driver	= {
+		.name		= DRVNAME,
+		.of_match_table	= of_match_ptr(arm_spe_pmu_of_match),
+	},
+	.probe	= arm_spe_pmu_device_dt_probe,
+	.remove	= arm_spe_pmu_device_remove,
+};
+
+static int __init arm_spe_pmu_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
+				      arm_spe_pmu_cpu_startup,
+				      arm_spe_pmu_cpu_teardown);
+	if (ret < 0)
+		return ret;
+	arm_spe_pmu_online = ret;
+
+	ret = platform_driver_register(&arm_spe_pmu_driver);
+	if (ret)
+		cpuhp_remove_multi_state(arm_spe_pmu_online);
+
+	return ret;
+}
+
+static void __exit arm_spe_pmu_exit(void)
+{
+	platform_driver_unregister(&arm_spe_pmu_driver);
+	cpuhp_remove_multi_state(arm_spe_pmu_online);
+}
+
+module_init(arm_spe_pmu_init);
+module_exit(arm_spe_pmu_exit);
+
+MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");

+ 1 - 0
drivers/perf/hisilicon/Makefile

@@ -0,0 +1 @@
+obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o

+ 463 - 0
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c

@@ -0,0 +1,463 @@
+/*
+ * HiSilicon SoC DDRC uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *         Anurup M <anurup.m@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* DDRC register definition */
+#define DDRC_PERF_CTRL		0x010
+#define DDRC_FLUX_WR		0x380
+#define DDRC_FLUX_RD		0x384
+#define DDRC_FLUX_WCMD          0x388
+#define DDRC_FLUX_RCMD          0x38c
+#define DDRC_PRE_CMD            0x3c0
+#define DDRC_ACT_CMD            0x3c4
+#define DDRC_BNK_CHG            0x3c8
+#define DDRC_RNK_CHG            0x3cc
+#define DDRC_EVENT_CTRL         0x6C0
+#define DDRC_INT_MASK		0x6c8
+#define DDRC_INT_STATUS		0x6cc
+#define DDRC_INT_CLEAR		0x6d0
+
+/* DDRC has 8-counters */
+#define DDRC_NR_COUNTERS	0x8
+#define DDRC_PERF_CTRL_EN	0x2
+
+/*
+ * For DDRC PMU, there are eight-events and every event has been mapped
+ * to fixed-purpose counters which register offset is not consistent.
+ * Therefore there is no write event type and we assume that event
+ * code (0 to 7) is equal to counter index in PMU driver.
+ */
+#define GET_DDRC_EVENTID(hwc)	(hwc->config_base & 0x7)
+
+static const u32 ddrc_reg_off[] = {
+	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
+	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
+};
+
+/*
+ * Select the counter register offset using the counter index.
+ * In DDRC there are no programmable counter, the count
+ * is readed form the statistics counter register itself.
+ */
+static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
+{
+	return ddrc_reg_off[cntr_idx];
+}
+
+static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
+				      struct hw_perf_event *hwc)
+{
+	/* Use event code as counter index */
+	u32 idx = GET_DDRC_EVENTID(hwc);
+
+	if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
+		dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return 0;
+	}
+
+	return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+}
+
+static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+					struct hw_perf_event *hwc, u64 val)
+{
+	u32 idx = GET_DDRC_EVENTID(hwc);
+
+	if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
+		dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return;
+	}
+
+	writel((u32)val,
+	       ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+}
+
+/*
+ * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
+ * so there is no need to write event type.
+ */
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+				       u32 type)
+{
+}
+
+static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
+{
+	u32 val;
+
+	/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
+	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
+	val |= DDRC_PERF_CTRL_EN;
+	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
+{
+	u32 val;
+
+	/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
+	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
+	val &= ~DDRC_PERF_CTRL_EN;
+	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
+					 struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Set counter index(event code) in DDRC_EVENT_CTRL register */
+	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
+	val |= (1 << GET_DDRC_EVENTID(hwc));
+	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
+					  struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Clear counter index(event code) in DDRC_EVENT_CTRL register */
+	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
+	val &= ~(1 << GET_DDRC_EVENTID(hwc));
+	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+}
+
+static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
+{
+	struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
+	unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
+	struct hw_perf_event *hwc = &event->hw;
+	/* For DDRC PMU, we use event code as counter index */
+	int idx = GET_DDRC_EVENTID(hwc);
+
+	if (test_bit(idx, used_mask))
+		return -EAGAIN;
+
+	set_bit(idx, used_mask);
+
+	return idx;
+}
+
+static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+					     struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Write 0 to enable interrupt */
+	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
+	val &= ~(1 << GET_DDRC_EVENTID(hwc));
+	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+}
+
+static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+					      struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Write 1 to mask interrupt */
+	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
+	val |= (1 << GET_DDRC_EVENTID(hwc));
+	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+}
+
+static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
+{
+	struct hisi_pmu *ddrc_pmu = dev_id;
+	struct perf_event *event;
+	unsigned long overflown;
+	int idx;
+
+	/* Read the DDRC_INT_STATUS register */
+	overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
+	if (!overflown)
+		return IRQ_NONE;
+
+	/*
+	 * Find the counter index which overflowed if the bit was set
+	 * and handle it
+	 */
+	for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
+		/* Write 1 to clear the IRQ status flag */
+		writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
+
+		/* Get the corresponding event struct */
+		event = ddrc_pmu->pmu_events.hw_events[idx];
+		if (!event)
+			continue;
+
+		hisi_uncore_pmu_event_update(event);
+		hisi_uncore_pmu_set_event_period(event);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
+				  struct platform_device *pdev)
+{
+	int irq, ret;
+
+	/* Read and init IRQ */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
+			       IRQF_NOBALANCING | IRQF_NO_THREAD,
+			       dev_name(&pdev->dev), ddrc_pmu);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"Fail to request IRQ:%d ret:%d\n", irq, ret);
+		return ret;
+	}
+
+	ddrc_pmu->irq = irq;
+
+	return 0;
+}
+
+static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
+	{ "HISI0233", },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
+
+static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
+				   struct hisi_pmu *ddrc_pmu)
+{
+	struct resource *res;
+
+	/*
+	 * Use the SCCL_ID and DDRC channel ID to identify the
+	 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
+	 */
+	if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
+				     &ddrc_pmu->index_id)) {
+		dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
+		return -EINVAL;
+	}
+
+	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+				     &ddrc_pmu->sccl_id)) {
+		dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
+		return -EINVAL;
+	}
+	/* DDRC PMUs only share the same SCCL */
+	ddrc_pmu->ccl_id = -1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ddrc_pmu->base)) {
+		dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
+		return PTR_ERR(ddrc_pmu->base);
+	}
+
+	return 0;
+}
+
+static struct attribute *hisi_ddrc_pmu_format_attr[] = {
+	HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
+	NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_format_group = {
+	.name = "format",
+	.attrs = hisi_ddrc_pmu_format_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_events_attr[] = {
+	HISI_PMU_EVENT_ATTR(flux_wr,		0x00),
+	HISI_PMU_EVENT_ATTR(flux_rd,		0x01),
+	HISI_PMU_EVENT_ATTR(flux_wcmd,		0x02),
+	HISI_PMU_EVENT_ATTR(flux_rcmd,		0x03),
+	HISI_PMU_EVENT_ATTR(pre_cmd,		0x04),
+	HISI_PMU_EVENT_ATTR(act_cmd,		0x05),
+	HISI_PMU_EVENT_ATTR(rnk_chg,		0x06),
+	HISI_PMU_EVENT_ATTR(rw_chg,		0x07),
+	NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_events_group = {
+	.name = "events",
+	.attrs = hisi_ddrc_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
+	.attrs = hisi_ddrc_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
+	&hisi_ddrc_pmu_format_group,
+	&hisi_ddrc_pmu_events_group,
+	&hisi_ddrc_pmu_cpumask_attr_group,
+	NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
+	.write_evtype           = hisi_ddrc_pmu_write_evtype,
+	.get_event_idx		= hisi_ddrc_pmu_get_event_idx,
+	.start_counters		= hisi_ddrc_pmu_start_counters,
+	.stop_counters		= hisi_ddrc_pmu_stop_counters,
+	.enable_counter		= hisi_ddrc_pmu_enable_counter,
+	.disable_counter	= hisi_ddrc_pmu_disable_counter,
+	.enable_counter_int	= hisi_ddrc_pmu_enable_counter_int,
+	.disable_counter_int	= hisi_ddrc_pmu_disable_counter_int,
+	.write_counter		= hisi_ddrc_pmu_write_counter,
+	.read_counter		= hisi_ddrc_pmu_read_counter,
+};
+
+static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
+				   struct hisi_pmu *ddrc_pmu)
+{
+	int ret;
+
+	ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
+	if (ret)
+		return ret;
+
+	ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
+	if (ret)
+		return ret;
+
+	ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
+	ddrc_pmu->counter_bits = 32;
+	ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
+	ddrc_pmu->dev = &pdev->dev;
+	ddrc_pmu->on_cpu = -1;
+	ddrc_pmu->check_event = 7;
+
+	return 0;
+}
+
+static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
+{
+	struct hisi_pmu *ddrc_pmu;
+	char *name;
+	int ret;
+
+	ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
+	if (!ddrc_pmu)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, ddrc_pmu);
+
+	ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+				       &ddrc_pmu->node);
+	if (ret) {
+		dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
+		return ret;
+	}
+
+	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
+			      ddrc_pmu->sccl_id, ddrc_pmu->index_id);
+	ddrc_pmu->pmu = (struct pmu) {
+		.name		= name,
+		.task_ctx_nr	= perf_invalid_context,
+		.event_init	= hisi_uncore_pmu_event_init,
+		.pmu_enable	= hisi_uncore_pmu_enable,
+		.pmu_disable	= hisi_uncore_pmu_disable,
+		.add		= hisi_uncore_pmu_add,
+		.del		= hisi_uncore_pmu_del,
+		.start		= hisi_uncore_pmu_start,
+		.stop		= hisi_uncore_pmu_stop,
+		.read		= hisi_uncore_pmu_read,
+		.attr_groups	= hisi_ddrc_pmu_attr_groups,
+	};
+
+	ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
+	if (ret) {
+		dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
+		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+					    &ddrc_pmu->node);
+	}
+
+	return ret;
+}
+
+static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
+{
+	struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
+
+	perf_pmu_unregister(&ddrc_pmu->pmu);
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+				    &ddrc_pmu->node);
+
+	return 0;
+}
+
+static struct platform_driver hisi_ddrc_pmu_driver = {
+	.driver = {
+		.name = "hisi_ddrc_pmu",
+		.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
+	},
+	.probe = hisi_ddrc_pmu_probe,
+	.remove = hisi_ddrc_pmu_remove,
+};
+
+static int __init hisi_ddrc_pmu_module_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+				      "AP_PERF_ARM_HISI_DDRC_ONLINE",
+				      hisi_uncore_pmu_online_cpu,
+				      hisi_uncore_pmu_offline_cpu);
+	if (ret) {
+		pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&hisi_ddrc_pmu_driver);
+	if (ret)
+		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
+
+	return ret;
+}
+module_init(hisi_ddrc_pmu_module_init);
+
+static void __exit hisi_ddrc_pmu_module_exit(void)
+{
+	platform_driver_unregister(&hisi_ddrc_pmu_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
+
+}
+module_exit(hisi_ddrc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");

+ 473 - 0
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c

@@ -0,0 +1,473 @@
+/*
+ * HiSilicon SoC HHA uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *         Anurup M <anurup.m@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* HHA register definition */
+#define HHA_INT_MASK		0x0804
+#define HHA_INT_STATUS		0x0808
+#define HHA_INT_CLEAR		0x080C
+#define HHA_PERF_CTRL		0x1E00
+#define HHA_EVENT_CTRL		0x1E04
+#define HHA_EVENT_TYPE0		0x1E80
+/*
+ * Each counter is 48-bits and [48:63] are reserved
+ * which are Read-As-Zero and Writes-Ignored.
+ */
+#define HHA_CNT0_LOWER		0x1F00
+
+/* HHA has 16-counters */
+#define HHA_NR_COUNTERS		0x10
+
+#define HHA_PERF_CTRL_EN	0x1
+#define HHA_EVTYPE_NONE		0xff
+
+/*
+ * Select the counter register offset using the counter index
+ * each counter is 48-bits.
+ */
+static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
+{
+	return (HHA_CNT0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
+				     struct hw_perf_event *hwc)
+{
+	u32 idx = hwc->idx;
+
+	if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
+		dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return 0;
+	}
+
+	/* Read 64 bits and like L3C, top 16 bits are RAZ */
+	return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+}
+
+static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
+				       struct hw_perf_event *hwc, u64 val)
+{
+	u32 idx = hwc->idx;
+
+	if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
+		dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return;
+	}
+
+	/* Write 64 bits and like L3C, top 16 bits are WI */
+	writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+}
+
+static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+				      u32 type)
+{
+	u32 reg, reg_idx, shift, val;
+
+	/*
+	 * Select the appropriate event select register(HHA_EVENT_TYPEx).
+	 * There are 4 event select registers for the 16 hardware counters.
+	 * Event code is 8-bits and for the first 4 hardware counters,
+	 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
+	 * HHA_EVENT_TYPE1 is chosen and so on.
+	 */
+	reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
+	reg_idx = idx % 4;
+	shift = 8 * reg_idx;
+
+	/* Write event code to HHA_EVENT_TYPEx register */
+	val = readl(hha_pmu->base + reg);
+	val &= ~(HHA_EVTYPE_NONE << shift);
+	val |= (type << shift);
+	writel(val, hha_pmu->base + reg);
+}
+
+static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
+{
+	u32 val;
+
+	/*
+	 * Set perf_enable bit in HHA_PERF_CTRL to start event
+	 * counting for all enabled counters.
+	 */
+	val = readl(hha_pmu->base + HHA_PERF_CTRL);
+	val |= HHA_PERF_CTRL_EN;
+	writel(val, hha_pmu->base + HHA_PERF_CTRL);
+}
+
+static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
+{
+	u32 val;
+
+	/*
+	 * Clear perf_enable bit in HHA_PERF_CTRL to stop event
+	 * counting for all enabled counters.
+	 */
+	val = readl(hha_pmu->base + HHA_PERF_CTRL);
+	val &= ~(HHA_PERF_CTRL_EN);
+	writel(val, hha_pmu->base + HHA_PERF_CTRL);
+}
+
+static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
+					struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Enable counter index in HHA_EVENT_CTRL register */
+	val = readl(hha_pmu->base + HHA_EVENT_CTRL);
+	val |= (1 << hwc->idx);
+	writel(val, hha_pmu->base + HHA_EVENT_CTRL);
+}
+
+static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
+					 struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Clear counter index in HHA_EVENT_CTRL register */
+	val = readl(hha_pmu->base + HHA_EVENT_CTRL);
+	val &= ~(1 << hwc->idx);
+	writel(val, hha_pmu->base + HHA_EVENT_CTRL);
+}
+
+static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
+					    struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Write 0 to enable interrupt */
+	val = readl(hha_pmu->base + HHA_INT_MASK);
+	val &= ~(1 << hwc->idx);
+	writel(val, hha_pmu->base + HHA_INT_MASK);
+}
+
+static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
+					     struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Write 1 to mask interrupt */
+	val = readl(hha_pmu->base + HHA_INT_MASK);
+	val |= (1 << hwc->idx);
+	writel(val, hha_pmu->base + HHA_INT_MASK);
+}
+
+static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
+{
+	struct hisi_pmu *hha_pmu = dev_id;
+	struct perf_event *event;
+	unsigned long overflown;
+	int idx;
+
+	/* Read HHA_INT_STATUS register */
+	overflown = readl(hha_pmu->base + HHA_INT_STATUS);
+	if (!overflown)
+		return IRQ_NONE;
+
+	/*
+	 * Find the counter index which overflowed if the bit was set
+	 * and handle it
+	 */
+	for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
+		/* Write 1 to clear the IRQ status flag */
+		writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
+
+		/* Get the corresponding event struct */
+		event = hha_pmu->pmu_events.hw_events[idx];
+		if (!event)
+			continue;
+
+		hisi_uncore_pmu_event_update(event);
+		hisi_uncore_pmu_set_event_period(event);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
+				 struct platform_device *pdev)
+{
+	int irq, ret;
+
+	/* Read and init IRQ */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
+			      IRQF_NOBALANCING | IRQF_NO_THREAD,
+			      dev_name(&pdev->dev), hha_pmu);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"Fail to request IRQ:%d ret:%d\n", irq, ret);
+		return ret;
+	}
+
+	hha_pmu->irq = irq;
+
+	return 0;
+}
+
+static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
+	{ "HISI0243", },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
+
+static int hisi_hha_pmu_init_data(struct platform_device *pdev,
+				  struct hisi_pmu *hha_pmu)
+{
+	unsigned long long id;
+	struct resource *res;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+				       "_UID", NULL, &id);
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
+
+	hha_pmu->index_id = id;
+
+	/*
+	 * Use SCCL_ID and UID to identify the HHA PMU, while
+	 * SCCL_ID is in MPIDR[aff2].
+	 */
+	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+				     &hha_pmu->sccl_id)) {
+		dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
+		return -EINVAL;
+	}
+	/* HHA PMUs only share the same SCCL */
+	hha_pmu->ccl_id = -1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hha_pmu->base)) {
+		dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
+		return PTR_ERR(hha_pmu->base);
+	}
+
+	return 0;
+}
+
+static struct attribute *hisi_hha_pmu_format_attr[] = {
+	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+	NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_format_group = {
+	.name = "format",
+	.attrs = hisi_hha_pmu_format_attr,
+};
+
+static struct attribute *hisi_hha_pmu_events_attr[] = {
+	HISI_PMU_EVENT_ATTR(rx_ops_num,		0x00),
+	HISI_PMU_EVENT_ATTR(rx_outer,		0x01),
+	HISI_PMU_EVENT_ATTR(rx_sccl,		0x02),
+	HISI_PMU_EVENT_ATTR(rx_ccix,		0x03),
+	HISI_PMU_EVENT_ATTR(rx_wbi,		0x04),
+	HISI_PMU_EVENT_ATTR(rx_wbip,		0x05),
+	HISI_PMU_EVENT_ATTR(rx_wtistash,	0x11),
+	HISI_PMU_EVENT_ATTR(rd_ddr_64b,		0x1c),
+	HISI_PMU_EVENT_ATTR(wr_dr_64b,		0x1d),
+	HISI_PMU_EVENT_ATTR(rd_ddr_128b,	0x1e),
+	HISI_PMU_EVENT_ATTR(wr_ddr_128b,	0x1f),
+	HISI_PMU_EVENT_ATTR(spill_num,		0x20),
+	HISI_PMU_EVENT_ATTR(spill_success,	0x21),
+	HISI_PMU_EVENT_ATTR(bi_num,		0x23),
+	HISI_PMU_EVENT_ATTR(mediated_num,	0x32),
+	HISI_PMU_EVENT_ATTR(tx_snp_num,		0x33),
+	HISI_PMU_EVENT_ATTR(tx_snp_outer,	0x34),
+	HISI_PMU_EVENT_ATTR(tx_snp_ccix,	0x35),
+	HISI_PMU_EVENT_ATTR(rx_snprspdata,	0x38),
+	HISI_PMU_EVENT_ATTR(rx_snprsp_outer,	0x3c),
+	HISI_PMU_EVENT_ATTR(sdir-lookup,	0x40),
+	HISI_PMU_EVENT_ATTR(edir-lookup,	0x41),
+	HISI_PMU_EVENT_ATTR(sdir-hit,		0x42),
+	HISI_PMU_EVENT_ATTR(edir-hit,		0x43),
+	HISI_PMU_EVENT_ATTR(sdir-home-migrate,	0x4c),
+	HISI_PMU_EVENT_ATTR(edir-home-migrate,  0x4d),
+	NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_events_group = {
+	.name = "events",
+	.attrs = hisi_hha_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
+	.attrs = hisi_hha_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
+	&hisi_hha_pmu_format_group,
+	&hisi_hha_pmu_events_group,
+	&hisi_hha_pmu_cpumask_attr_group,
+	NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
+	.write_evtype		= hisi_hha_pmu_write_evtype,
+	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
+	.start_counters		= hisi_hha_pmu_start_counters,
+	.stop_counters		= hisi_hha_pmu_stop_counters,
+	.enable_counter		= hisi_hha_pmu_enable_counter,
+	.disable_counter	= hisi_hha_pmu_disable_counter,
+	.enable_counter_int	= hisi_hha_pmu_enable_counter_int,
+	.disable_counter_int	= hisi_hha_pmu_disable_counter_int,
+	.write_counter		= hisi_hha_pmu_write_counter,
+	.read_counter		= hisi_hha_pmu_read_counter,
+};
+
+static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
+				  struct hisi_pmu *hha_pmu)
+{
+	int ret;
+
+	ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
+	if (ret)
+		return ret;
+
+	ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
+	if (ret)
+		return ret;
+
+	hha_pmu->num_counters = HHA_NR_COUNTERS;
+	hha_pmu->counter_bits = 48;
+	hha_pmu->ops = &hisi_uncore_hha_ops;
+	hha_pmu->dev = &pdev->dev;
+	hha_pmu->on_cpu = -1;
+	hha_pmu->check_event = 0x65;
+
+	return 0;
+}
+
+static int hisi_hha_pmu_probe(struct platform_device *pdev)
+{
+	struct hisi_pmu *hha_pmu;
+	char *name;
+	int ret;
+
+	hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
+	if (!hha_pmu)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, hha_pmu);
+
+	ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+				       &hha_pmu->node);
+	if (ret) {
+		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+		return ret;
+	}
+
+	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
+			      hha_pmu->sccl_id, hha_pmu->index_id);
+	hha_pmu->pmu = (struct pmu) {
+		.name		= name,
+		.task_ctx_nr	= perf_invalid_context,
+		.event_init	= hisi_uncore_pmu_event_init,
+		.pmu_enable	= hisi_uncore_pmu_enable,
+		.pmu_disable	= hisi_uncore_pmu_disable,
+		.add		= hisi_uncore_pmu_add,
+		.del		= hisi_uncore_pmu_del,
+		.start		= hisi_uncore_pmu_start,
+		.stop		= hisi_uncore_pmu_stop,
+		.read		= hisi_uncore_pmu_read,
+		.attr_groups	= hisi_hha_pmu_attr_groups,
+	};
+
+	ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
+	if (ret) {
+		dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
+		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+					    &hha_pmu->node);
+	}
+
+	return ret;
+}
+
+static int hisi_hha_pmu_remove(struct platform_device *pdev)
+{
+	struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
+
+	perf_pmu_unregister(&hha_pmu->pmu);
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+				    &hha_pmu->node);
+
+	return 0;
+}
+
+static struct platform_driver hisi_hha_pmu_driver = {
+	.driver = {
+		.name = "hisi_hha_pmu",
+		.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
+	},
+	.probe = hisi_hha_pmu_probe,
+	.remove = hisi_hha_pmu_remove,
+};
+
+static int __init hisi_hha_pmu_module_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+				      "AP_PERF_ARM_HISI_HHA_ONLINE",
+				      hisi_uncore_pmu_online_cpu,
+				      hisi_uncore_pmu_offline_cpu);
+	if (ret) {
+		pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&hisi_hha_pmu_driver);
+	if (ret)
+		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
+
+	return ret;
+}
+module_init(hisi_hha_pmu_module_init);
+
+static void __exit hisi_hha_pmu_module_exit(void)
+{
+	platform_driver_unregister(&hisi_hha_pmu_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
+}
+module_exit(hisi_hha_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");

+ 463 - 0
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c

@@ -0,0 +1,463 @@
+/*
+ * HiSilicon SoC L3C uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ *         Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* L3C register definition */
+#define L3C_PERF_CTRL		0x0408
+#define L3C_INT_MASK		0x0800
+#define L3C_INT_STATUS		0x0808
+#define L3C_INT_CLEAR		0x080c
+#define L3C_EVENT_CTRL	        0x1c00
+#define L3C_EVENT_TYPE0		0x1d00
+/*
+ * Each counter is 48-bits and [48:63] are reserved
+ * which are Read-As-Zero and Writes-Ignored.
+ */
+#define L3C_CNTR0_LOWER		0x1e00
+
+/* L3C has 8-counters */
+#define L3C_NR_COUNTERS		0x8
+
+#define L3C_PERF_CTRL_EN	0x20000
+#define L3C_EVTYPE_NONE		0xff
+
+/*
+ * Select the counter register offset using the counter index
+ */
+static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
+{
+	return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
+				     struct hw_perf_event *hwc)
+{
+	u32 idx = hwc->idx;
+
+	if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+		dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return 0;
+	}
+
+	/* Read 64-bits and the upper 16 bits are RAZ */
+	return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+}
+
+static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
+				       struct hw_perf_event *hwc, u64 val)
+{
+	u32 idx = hwc->idx;
+
+	if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+		dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return;
+	}
+
+	/* Write 64-bits and the upper 16 bits are WI */
+	writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+}
+
+static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
+				      u32 type)
+{
+	u32 reg, reg_idx, shift, val;
+
+	/*
+	 * Select the appropriate event select register(L3C_EVENT_TYPE0/1).
+	 * There are 2 event select registers for the 8 hardware counters.
+	 * Event code is 8-bits and for the former 4 hardware counters,
+	 * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+	 * L3C_EVENT_TYPE1 is chosen.
+	 */
+	reg = L3C_EVENT_TYPE0 + (idx / 4) * 4;
+	reg_idx = idx % 4;
+	shift = 8 * reg_idx;
+
+	/* Write event code to L3C_EVENT_TYPEx Register */
+	val = readl(l3c_pmu->base + reg);
+	val &= ~(L3C_EVTYPE_NONE << shift);
+	val |= (type << shift);
+	writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
+{
+	u32 val;
+
+	/*
+	 * Set perf_enable bit in L3C_PERF_CTRL register to start counting
+	 * for all enabled counters.
+	 */
+	val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+	val |= L3C_PERF_CTRL_EN;
+	writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
+{
+	u32 val;
+
+	/*
+	 * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
+	 * for all enabled counters.
+	 */
+	val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+	val &= ~(L3C_PERF_CTRL_EN);
+	writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
+					struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Enable counter index in L3C_EVENT_CTRL register */
+	val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+	val |= (1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
+					 struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	/* Clear counter index in L3C_EVENT_CTRL register */
+	val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+	val &= ~(1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
+					    struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	val = readl(l3c_pmu->base + L3C_INT_MASK);
+	/* Write 0 to enable interrupt */
+	val &= ~(1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
+					     struct hw_perf_event *hwc)
+{
+	u32 val;
+
+	val = readl(l3c_pmu->base + L3C_INT_MASK);
+	/* Write 1 to mask interrupt */
+	val |= (1 << hwc->idx);
+	writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+{
+	struct hisi_pmu *l3c_pmu = dev_id;
+	struct perf_event *event;
+	unsigned long overflown;
+	int idx;
+
+	/* Read L3C_INT_STATUS register */
+	overflown = readl(l3c_pmu->base + L3C_INT_STATUS);
+	if (!overflown)
+		return IRQ_NONE;
+
+	/*
+	 * Find the counter index which overflowed if the bit was set
+	 * and handle it.
+	 */
+	for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
+		/* Write 1 to clear the IRQ status flag */
+		writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
+
+		/* Get the corresponding event struct */
+		event = l3c_pmu->pmu_events.hw_events[idx];
+		if (!event)
+			continue;
+
+		hisi_uncore_pmu_event_update(event);
+		hisi_uncore_pmu_set_event_period(event);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
+				 struct platform_device *pdev)
+{
+	int irq, ret;
+
+	/* Read and init IRQ */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
+			       IRQF_NOBALANCING | IRQF_NO_THREAD,
+			       dev_name(&pdev->dev), l3c_pmu);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"Fail to request IRQ:%d ret:%d\n", irq, ret);
+		return ret;
+	}
+
+	l3c_pmu->irq = irq;
+
+	return 0;
+}
+
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+	{ "HISI0213", },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
+static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
+				  struct hisi_pmu *l3c_pmu)
+{
+	unsigned long long id;
+	struct resource *res;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+				       "_UID", NULL, &id);
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
+
+	l3c_pmu->index_id = id;
+
+	/*
+	 * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
+	 * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
+	 */
+	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+				     &l3c_pmu->sccl_id)) {
+		dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
+		return -EINVAL;
+	}
+
+	if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
+				     &l3c_pmu->ccl_id)) {
+		dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	l3c_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(l3c_pmu->base)) {
+		dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
+		return PTR_ERR(l3c_pmu->base);
+	}
+
+	return 0;
+}
+
+static struct attribute *hisi_l3c_pmu_format_attr[] = {
+	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+	NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_format_group = {
+	.name = "format",
+	.attrs = hisi_l3c_pmu_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_events_attr[] = {
+	HISI_PMU_EVENT_ATTR(rd_cpipe,		0x00),
+	HISI_PMU_EVENT_ATTR(wr_cpipe,		0x01),
+	HISI_PMU_EVENT_ATTR(rd_hit_cpipe,	0x02),
+	HISI_PMU_EVENT_ATTR(wr_hit_cpipe,	0x03),
+	HISI_PMU_EVENT_ATTR(victim_num,		0x04),
+	HISI_PMU_EVENT_ATTR(rd_spipe,		0x20),
+	HISI_PMU_EVENT_ATTR(wr_spipe,		0x21),
+	HISI_PMU_EVENT_ATTR(rd_hit_spipe,	0x22),
+	HISI_PMU_EVENT_ATTR(wr_hit_spipe,	0x23),
+	HISI_PMU_EVENT_ATTR(back_invalid,	0x29),
+	HISI_PMU_EVENT_ATTR(retry_cpu,		0x40),
+	HISI_PMU_EVENT_ATTR(retry_ring,		0x41),
+	HISI_PMU_EVENT_ATTR(prefetch_drop,	0x42),
+	NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_events_group = {
+	.name = "events",
+	.attrs = hisi_l3c_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
+	.attrs = hisi_l3c_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
+	&hisi_l3c_pmu_format_group,
+	&hisi_l3c_pmu_events_group,
+	&hisi_l3c_pmu_cpumask_attr_group,
+	NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
+	.write_evtype		= hisi_l3c_pmu_write_evtype,
+	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
+	.start_counters		= hisi_l3c_pmu_start_counters,
+	.stop_counters		= hisi_l3c_pmu_stop_counters,
+	.enable_counter		= hisi_l3c_pmu_enable_counter,
+	.disable_counter	= hisi_l3c_pmu_disable_counter,
+	.enable_counter_int	= hisi_l3c_pmu_enable_counter_int,
+	.disable_counter_int	= hisi_l3c_pmu_disable_counter_int,
+	.write_counter		= hisi_l3c_pmu_write_counter,
+	.read_counter		= hisi_l3c_pmu_read_counter,
+};
+
+static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
+				  struct hisi_pmu *l3c_pmu)
+{
+	int ret;
+
+	ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
+	if (ret)
+		return ret;
+
+	ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+	if (ret)
+		return ret;
+
+	l3c_pmu->num_counters = L3C_NR_COUNTERS;
+	l3c_pmu->counter_bits = 48;
+	l3c_pmu->ops = &hisi_uncore_l3c_ops;
+	l3c_pmu->dev = &pdev->dev;
+	l3c_pmu->on_cpu = -1;
+	l3c_pmu->check_event = 0x59;
+
+	return 0;
+}
+
+static int hisi_l3c_pmu_probe(struct platform_device *pdev)
+{
+	struct hisi_pmu *l3c_pmu;
+	char *name;
+	int ret;
+
+	l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
+	if (!l3c_pmu)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, l3c_pmu);
+
+	ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+				       &l3c_pmu->node);
+	if (ret) {
+		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+		return ret;
+	}
+
+	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
+			      l3c_pmu->sccl_id, l3c_pmu->index_id);
+	l3c_pmu->pmu = (struct pmu) {
+		.name		= name,
+		.task_ctx_nr	= perf_invalid_context,
+		.event_init	= hisi_uncore_pmu_event_init,
+		.pmu_enable	= hisi_uncore_pmu_enable,
+		.pmu_disable	= hisi_uncore_pmu_disable,
+		.add		= hisi_uncore_pmu_add,
+		.del		= hisi_uncore_pmu_del,
+		.start		= hisi_uncore_pmu_start,
+		.stop		= hisi_uncore_pmu_stop,
+		.read		= hisi_uncore_pmu_read,
+		.attr_groups	= hisi_l3c_pmu_attr_groups,
+	};
+
+	ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
+	if (ret) {
+		dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
+		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+					    &l3c_pmu->node);
+	}
+
+	return ret;
+}
+
+static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+{
+	struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
+
+	perf_pmu_unregister(&l3c_pmu->pmu);
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+				    &l3c_pmu->node);
+
+	return 0;
+}
+
+static struct platform_driver hisi_l3c_pmu_driver = {
+	.driver = {
+		.name = "hisi_l3c_pmu",
+		.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
+	},
+	.probe = hisi_l3c_pmu_probe,
+	.remove = hisi_l3c_pmu_remove,
+};
+
+static int __init hisi_l3c_pmu_module_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+				      "AP_PERF_ARM_HISI_L3_ONLINE",
+				      hisi_uncore_pmu_online_cpu,
+				      hisi_uncore_pmu_offline_cpu);
+	if (ret) {
+		pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&hisi_l3c_pmu_driver);
+	if (ret)
+		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+
+	return ret;
+}
+module_init(hisi_l3c_pmu_module_init);
+
+static void __exit hisi_l3c_pmu_module_exit(void)
+{
+	platform_driver_unregister(&hisi_l3c_pmu_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+}
+module_exit(hisi_l3c_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");

+ 447 - 0
drivers/perf/hisilicon/hisi_uncore_pmu.c

@@ -0,0 +1,447 @@
+/*
+ * HiSilicon SoC Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ *         Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+
+#include <asm/local64.h>
+
+#include "hisi_uncore_pmu.h"
+
+#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1)
+
+/*
+ * PMU format attributes
+ */
+ssize_t hisi_format_sysfs_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+	return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+/*
+ * PMU event attributes
+ */
+ssize_t hisi_event_sysfs_show(struct device *dev,
+			      struct device_attribute *attr, char *page)
+{
+	struct dev_ext_attribute *eattr;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+	return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+/*
+ * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
+ */
+ssize_t hisi_cpumask_sysfs_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+	return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
+}
+
+static bool hisi_validate_event_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	/* Include count for the event */
+	int counters = 1;
+
+	if (!is_software_event(leader)) {
+		/*
+		 * We must NOT create groups containing mixed PMUs, although
+		 * software events are acceptable
+		 */
+		if (leader->pmu != event->pmu)
+			return false;
+
+		/* Increment counter for the leader */
+		if (leader != event)
+			counters++;
+	}
+
+	list_for_each_entry(sibling, &event->group_leader->sibling_list,
+			    group_entry) {
+		if (is_software_event(sibling))
+			continue;
+		if (sibling->pmu != event->pmu)
+			return false;
+		/* Increment counter for each sibling */
+		counters++;
+	}
+
+	/* The group can not count events more than the counters in the HW */
+	return counters <= hisi_pmu->num_counters;
+}
+
+int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
+{
+	return idx >= 0 && idx < hisi_pmu->num_counters;
+}
+
+int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	unsigned long *used_mask = hisi_pmu->pmu_events.used_mask;
+	u32 num_counters = hisi_pmu->num_counters;
+	int idx;
+
+	idx = find_first_zero_bit(used_mask, num_counters);
+	if (idx == num_counters)
+		return -EAGAIN;
+
+	set_bit(idx, used_mask);
+
+	return idx;
+}
+
+static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
+{
+	if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
+		dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx);
+		return;
+	}
+
+	clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+}
+
+int hisi_uncore_pmu_event_init(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	struct hisi_pmu *hisi_pmu;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	/*
+	 * We do not support sampling as the counters are all
+	 * shared by all CPU cores in a CPU die(SCCL). Also we
+	 * do not support attach to a task(per-process mode)
+	 */
+	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+		return -EOPNOTSUPP;
+
+	/* counters do not have these bits */
+	if (event->attr.exclude_user	||
+	    event->attr.exclude_kernel	||
+	    event->attr.exclude_host	||
+	    event->attr.exclude_guest	||
+	    event->attr.exclude_hv	||
+	    event->attr.exclude_idle)
+		return -EINVAL;
+
+	/*
+	 *  The uncore counters not specific to any CPU, so cannot
+	 *  support per-task
+	 */
+	if (event->cpu < 0)
+		return -EINVAL;
+
+	/*
+	 * Validate if the events in group does not exceed the
+	 * available counters in hardware.
+	 */
+	if (!hisi_validate_event_group(event))
+		return -EINVAL;
+
+	hisi_pmu = to_hisi_pmu(event->pmu);
+	if (event->attr.config > hisi_pmu->check_event)
+		return -EINVAL;
+
+	if (hisi_pmu->on_cpu == -1)
+		return -EINVAL;
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet.
+	 */
+	hwc->idx		= -1;
+	hwc->config_base	= event->attr.config;
+
+	/* Enforce to use the same CPU for all events in this PMU */
+	event->cpu = hisi_pmu->on_cpu;
+
+	return 0;
+}
+
+/*
+ * Set the counter to count the event that we're interested in,
+ * and enable interrupt and counter.
+ */
+static void hisi_uncore_pmu_enable_event(struct perf_event *event)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
+				    HISI_GET_EVENTID(event));
+
+	hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
+	hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
+}
+
+/*
+ * Disable counter and interrupt.
+ */
+static void hisi_uncore_pmu_disable_event(struct perf_event *event)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
+	hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
+}
+
+void hisi_uncore_pmu_set_event_period(struct perf_event *event)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * The HiSilicon PMU counters support 32 bits or 48 bits, depending on
+	 * the PMU. We reduce it to 2^(counter_bits - 1) to account for the
+	 * extreme interrupt latency. So we could hopefully handle the overflow
+	 * interrupt before another 2^(counter_bits - 1) events occur and the
+	 * counter overtakes its previous value.
+	 */
+	u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
+
+	local64_set(&hwc->prev_count, val);
+	/* Write start value to the hardware event counter */
+	hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
+}
+
+void hisi_uncore_pmu_event_update(struct perf_event *event)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+	do {
+		/* Read the count from the counter register */
+		new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc);
+		prev_raw_count = local64_read(&hwc->prev_count);
+	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+				 new_raw_count) != prev_raw_count);
+	/*
+	 * compute the delta
+	 */
+	delta = (new_raw_count - prev_raw_count) &
+		HISI_MAX_PERIOD(hisi_pmu->counter_bits);
+	local64_add(delta, &event->count);
+}
+
+void hisi_uncore_pmu_start(struct perf_event *event, int flags)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+		return;
+
+	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+	hwc->state = 0;
+	hisi_uncore_pmu_set_event_period(event);
+
+	if (flags & PERF_EF_RELOAD) {
+		u64 prev_raw_count =  local64_read(&hwc->prev_count);
+
+		hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count);
+	}
+
+	hisi_uncore_pmu_enable_event(event);
+	perf_event_update_userpage(event);
+}
+
+void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	hisi_uncore_pmu_disable_event(event);
+	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+	hwc->state |= PERF_HES_STOPPED;
+
+	if (hwc->state & PERF_HES_UPTODATE)
+		return;
+
+	/* Read hardware counter and update the perf counter statistics */
+	hisi_uncore_pmu_event_update(event);
+	hwc->state |= PERF_HES_UPTODATE;
+}
+
+int hisi_uncore_pmu_add(struct perf_event *event, int flags)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+	/* Get an available counter index for counting */
+	idx = hisi_pmu->ops->get_event_idx(event);
+	if (idx < 0)
+		return idx;
+
+	event->hw.idx = idx;
+	hisi_pmu->pmu_events.hw_events[idx] = event;
+
+	if (flags & PERF_EF_START)
+		hisi_uncore_pmu_start(event, PERF_EF_RELOAD);
+
+	return 0;
+}
+
+void hisi_uncore_pmu_del(struct perf_event *event, int flags)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	hisi_uncore_pmu_stop(event, PERF_EF_UPDATE);
+	hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx);
+	perf_event_update_userpage(event);
+	hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
+}
+
+void hisi_uncore_pmu_read(struct perf_event *event)
+{
+	/* Read hardware counter and update the perf counter statistics */
+	hisi_uncore_pmu_event_update(event);
+}
+
+void hisi_uncore_pmu_enable(struct pmu *pmu)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
+	int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask,
+				    hisi_pmu->num_counters);
+
+	if (!enabled)
+		return;
+
+	hisi_pmu->ops->start_counters(hisi_pmu);
+}
+
+void hisi_uncore_pmu_disable(struct pmu *pmu)
+{
+	struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
+
+	hisi_pmu->ops->stop_counters(hisi_pmu);
+}
+
+/*
+ * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
+ * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID
+ * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is
+ * in MPIDR[aff1]. If this changes in future, this shall be updated.
+ */
+static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+{
+	u64 mpidr = read_cpuid_mpidr();
+
+	if (mpidr & MPIDR_MT_BITMASK) {
+		if (sccl_id)
+			*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+		if (ccl_id)
+			*ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+	} else {
+		if (sccl_id)
+			*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+		if (ccl_id)
+			*ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+	}
+}
+
+/*
+ * Check whether the CPU is associated with this uncore PMU
+ */
+static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
+{
+	int sccl_id, ccl_id;
+
+	if (hisi_pmu->ccl_id == -1) {
+		/* If CCL_ID is -1, the PMU only shares the same SCCL */
+		hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
+
+		return sccl_id == hisi_pmu->sccl_id;
+	}
+
+	hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
+
+	return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+}
+
+int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
+						     node);
+
+	if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+		return 0;
+
+	cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
+
+	/* If another CPU is already managing this PMU, simply return. */
+	if (hisi_pmu->on_cpu != -1)
+		return 0;
+
+	/* Use this CPU in cpumask for event counting */
+	hisi_pmu->on_cpu = cpu;
+
+	/* Overflow interrupt also should use the same CPU */
+	WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
+
+	return 0;
+}
+
+int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
+						     node);
+	cpumask_t pmu_online_cpus;
+	unsigned int target;
+
+	if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
+		return 0;
+
+	/* Nothing to do if this CPU doesn't own the PMU */
+	if (hisi_pmu->on_cpu != cpu)
+		return 0;
+
+	/* Give up ownership of the PMU */
+	hisi_pmu->on_cpu = -1;
+
+	/* Choose a new CPU to migrate ownership of the PMU to */
+	cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
+		    cpu_online_mask);
+	target = cpumask_any_but(&pmu_online_cpus, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
+	/* Use this CPU for event counting */
+	hisi_pmu->on_cpu = target;
+	WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
+
+	return 0;
+}

+ 102 - 0
drivers/perf/hisilicon/hisi_uncore_pmu.h

@@ -0,0 +1,102 @@
+/*
+ * HiSilicon SoC Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ *         Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __HISI_UNCORE_PMU_H__
+#define __HISI_UNCORE_PMU_H__
+
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "hisi_pmu: " fmt
+
+#define HISI_MAX_COUNTERS 0x10
+#define to_hisi_pmu(p)	(container_of(p, struct hisi_pmu, pmu))
+
+#define HISI_PMU_ATTR(_name, _func, _config)				\
+	(&((struct dev_ext_attribute[]) {				\
+		{ __ATTR(_name, 0444, _func, NULL), (void *)_config }   \
+	})[0].attr.attr)
+
+#define HISI_PMU_FORMAT_ATTR(_name, _config)		\
+	HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config)
+#define HISI_PMU_EVENT_ATTR(_name, _config)		\
+	HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
+
+struct hisi_pmu;
+
+struct hisi_uncore_ops {
+	void (*write_evtype)(struct hisi_pmu *, int, u32);
+	int (*get_event_idx)(struct perf_event *);
+	u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *);
+	void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64);
+	void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *);
+	void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *);
+	void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
+	void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
+	void (*start_counters)(struct hisi_pmu *);
+	void (*stop_counters)(struct hisi_pmu *);
+};
+
+struct hisi_pmu_hwevents {
+	struct perf_event *hw_events[HISI_MAX_COUNTERS];
+	DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
+};
+
+/* Generic pmu struct for different pmu types */
+struct hisi_pmu {
+	struct pmu pmu;
+	const struct hisi_uncore_ops *ops;
+	struct hisi_pmu_hwevents pmu_events;
+	/* associated_cpus: All CPUs associated with the PMU */
+	cpumask_t associated_cpus;
+	/* CPU used for counting */
+	int on_cpu;
+	int irq;
+	struct device *dev;
+	struct hlist_node node;
+	int sccl_id;
+	int ccl_id;
+	void __iomem *base;
+	/* the ID of the PMU modules */
+	u32 index_id;
+	int num_counters;
+	int counter_bits;
+	/* check event code range */
+	int check_event;
+};
+
+int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
+int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
+void hisi_uncore_pmu_read(struct perf_event *event);
+int hisi_uncore_pmu_add(struct perf_event *event, int flags);
+void hisi_uncore_pmu_del(struct perf_event *event, int flags);
+void hisi_uncore_pmu_start(struct perf_event *event, int flags);
+void hisi_uncore_pmu_stop(struct perf_event *event, int flags);
+void hisi_uncore_pmu_set_event_period(struct perf_event *event);
+void hisi_uncore_pmu_event_update(struct perf_event *event);
+int hisi_uncore_pmu_event_init(struct perf_event *event);
+void hisi_uncore_pmu_enable(struct pmu *pmu);
+void hisi_uncore_pmu_disable(struct pmu *pmu);
+ssize_t hisi_event_sysfs_show(struct device *dev,
+			      struct device_attribute *attr, char *buf);
+ssize_t hisi_format_sysfs_show(struct device *dev,
+			       struct device_attribute *attr, char *buf);
+ssize_t hisi_cpumask_sysfs_show(struct device *dev,
+				struct device_attribute *attr, char *buf);
+int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
+int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
+#endif /* __HISI_UNCORE_PMU_H__ */

+ 3 - 0
include/linux/cpuhotplug.h

@@ -153,6 +153,9 @@ enum cpuhp_state {
 	CPUHP_AP_PERF_S390_SF_ONLINE,
 	CPUHP_AP_PERF_S390_SF_ONLINE,
 	CPUHP_AP_PERF_ARM_CCI_ONLINE,
 	CPUHP_AP_PERF_ARM_CCI_ONLINE,
 	CPUHP_AP_PERF_ARM_CCN_ONLINE,
 	CPUHP_AP_PERF_ARM_CCN_ONLINE,
+	CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+	CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+	CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
 	CPUHP_AP_PERF_ARM_L2X0_ONLINE,
 	CPUHP_AP_PERF_ARM_L2X0_ONLINE,
 	CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 	CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 	CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
 	CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,

+ 1 - 0
include/uapi/linux/perf_event.h

@@ -941,6 +941,7 @@ enum perf_callchain_context {
 #define PERF_AUX_FLAG_TRUNCATED		0x01	/* record was truncated to fit */
 #define PERF_AUX_FLAG_TRUNCATED		0x01	/* record was truncated to fit */
 #define PERF_AUX_FLAG_OVERWRITE		0x02	/* snapshot from overwrite mode */
 #define PERF_AUX_FLAG_OVERWRITE		0x02	/* snapshot from overwrite mode */
 #define PERF_AUX_FLAG_PARTIAL		0x04	/* record contains gaps */
 #define PERF_AUX_FLAG_PARTIAL		0x04	/* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION		0x08	/* sample collided with another */
 
 
 #define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
 #define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
 #define PERF_FLAG_FD_OUTPUT		(1UL << 1)
 #define PERF_FLAG_FD_OUTPUT		(1UL << 1)

+ 4 - 0
kernel/events/ring_buffer.c

@@ -411,6 +411,7 @@ err:
 
 
 	return NULL;
 	return NULL;
 }
 }
+EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
 
 static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
 static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
 {
 {
@@ -480,6 +481,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 	rb_free_aux(rb);
 	rb_free_aux(rb);
 	ring_buffer_put(rb);
 	ring_buffer_put(rb);
 }
 }
+EXPORT_SYMBOL_GPL(perf_aux_output_end);
 
 
 /*
 /*
  * Skip over a given number of bytes in the AUX buffer, due to, for example,
  * Skip over a given number of bytes in the AUX buffer, due to, for example,
@@ -505,6 +507,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(perf_aux_output_skip);
 
 
 void *perf_get_aux(struct perf_output_handle *handle)
 void *perf_get_aux(struct perf_output_handle *handle)
 {
 {
@@ -514,6 +517,7 @@ void *perf_get_aux(struct perf_output_handle *handle)
 
 
 	return handle->rb->aux_priv;
 	return handle->rb->aux_priv;
 }
 }
+EXPORT_SYMBOL_GPL(perf_get_aux);
 
 
 #define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
 #define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
 
 

+ 1 - 0
kernel/irq/irqdesc.c

@@ -863,6 +863,7 @@ int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
 
 
 void kstat_incr_irq_this_cpu(unsigned int irq)
 void kstat_incr_irq_this_cpu(unsigned int irq)
 {
 {