|
@@ -731,6 +731,138 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
|
|
},
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Krait HW events mapping
|
|
|
+ */
|
|
|
+static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
|
|
|
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
|
|
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
|
|
|
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
|
|
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
|
|
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
|
|
|
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
|
|
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
|
|
+};
|
|
|
+
|
|
|
+static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
|
|
|
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
|
|
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
|
|
|
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
|
|
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
|
|
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
|
|
|
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
|
|
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
|
|
+};
|
|
|
+
|
|
|
+static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
|
|
+ [PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
|
|
+ [C(L1D)] = {
|
|
|
+ /*
|
|
|
+ * The performance counters don't differentiate between read
|
|
|
+ * and write accesses/misses so this isn't strictly correct,
|
|
|
+ * but it's the best we can do. Writes and reads get
|
|
|
+ * combined.
|
|
|
+ */
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
|
|
|
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
|
|
|
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+ [C(L1I)] = {
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+ [C(LL)] = {
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+ [C(DTLB)] = {
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+ [C(ITLB)] = {
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+ [C(BPU)] = {
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
|
|
|
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
|
|
|
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+ [C(NODE)] = {
|
|
|
+ [C(OP_READ)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_WRITE)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ [C(OP_PREFETCH)] = {
|
|
|
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
|
|
+ },
|
|
|
+ },
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Perf Events' indices
|
|
|
*/
|
|
@@ -1212,6 +1344,18 @@ static int armv7_a7_map_event(struct perf_event *event)
|
|
|
&armv7_a7_perf_cache_map, 0xFF);
|
|
|
}
|
|
|
|
|
|
+static int krait_map_event(struct perf_event *event)
|
|
|
+{
|
|
|
+ return armpmu_map_event(event, &krait_perf_map,
|
|
|
+ &krait_perf_cache_map, 0xFFFFF);
|
|
|
+}
|
|
|
+
|
|
|
+static int krait_map_event_no_branch(struct perf_event *event)
|
|
|
+{
|
|
|
+ return armpmu_map_event(event, &krait_perf_map_no_branch,
|
|
|
+ &krait_perf_cache_map, 0xFFFFF);
|
|
|
+}
|
|
|
+
|
|
|
static void armv7pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
cpu_pmu->handle_irq = armv7pmu_handle_irq;
|
|
@@ -1283,6 +1427,21 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+static int krait_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
+{
|
|
|
+ armv7pmu_init(cpu_pmu);
|
|
|
+ cpu_pmu->name = "ARMv7 Krait";
|
|
|
+ /* Some early versions of Krait don't support PC write events */
|
|
|
+ if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
|
|
|
+ "qcom,no-pc-write"))
|
|
|
+ cpu_pmu->map_event = krait_map_event_no_branch;
|
|
|
+ else
|
|
|
+ cpu_pmu->map_event = krait_map_event;
|
|
|
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
|
|
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
#else
|
|
|
static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
@@ -1308,4 +1467,9 @@ static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
+
|
|
|
+static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
|
|
|
+{
|
|
|
+ return -ENODEV;
|
|
|
+}
|
|
|
#endif /* CONFIG_CPU_V7 */
|