|
@@ -107,6 +107,7 @@ struct cci_pmu_hw_events {
|
|
raw_spinlock_t pmu_lock;
|
|
raw_spinlock_t pmu_lock;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+struct cci_pmu;
|
|
/*
|
|
/*
|
|
* struct cci_pmu_model:
|
|
* struct cci_pmu_model:
|
|
* @fixed_hw_cntrs - Number of fixed event counters
|
|
* @fixed_hw_cntrs - Number of fixed event counters
|
|
@@ -119,6 +120,8 @@ struct cci_pmu_model {
|
|
u32 num_hw_cntrs;
|
|
u32 num_hw_cntrs;
|
|
u32 cntr_size;
|
|
u32 cntr_size;
|
|
struct event_range event_ranges[CCI_IF_MAX];
|
|
struct event_range event_ranges[CCI_IF_MAX];
|
|
|
|
+ int (*validate_hw_event)(struct cci_pmu *, unsigned long);
|
|
|
|
+ int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
|
|
};
|
|
};
|
|
|
|
|
|
static struct cci_pmu_model cci_pmu_models[];
|
|
static struct cci_pmu_model cci_pmu_models[];
|
|
@@ -189,7 +192,29 @@ enum cci400_perf_events {
|
|
#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
|
|
#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
|
|
#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
|
|
#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
|
|
|
|
|
|
-static int pmu_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
|
|
|
|
|
|
+static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
|
|
|
|
+ struct cci_pmu_hw_events *hw,
|
|
|
|
+ unsigned long cci_event)
|
|
|
|
+{
|
|
|
|
+ int idx;
|
|
|
|
+
|
|
|
|
+ /* cycles event idx is fixed */
|
|
|
|
+ if (cci_event == CCI_PMU_CYCLES) {
|
|
|
|
+ if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
|
|
|
|
+ return -EAGAIN;
|
|
|
|
+
|
|
|
|
+ return CCI_PMU_CYCLE_CNTR_IDX;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
|
|
|
|
+ if (!test_and_set_bit(idx, hw->used_mask))
|
|
|
|
+ return idx;
|
|
|
|
+
|
|
|
|
+ /* No counters available */
|
|
|
|
+ return -EAGAIN;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
|
|
{
|
|
{
|
|
u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
|
|
u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
|
|
u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
|
|
u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
|
|
@@ -198,6 +223,9 @@ static int pmu_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event
|
|
if (hw_event & ~CCI_PMU_EVENT_MASK)
|
|
if (hw_event & ~CCI_PMU_EVENT_MASK)
|
|
return -ENOENT;
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
+ if (hw_event == CCI_PMU_CYCLES)
|
|
|
|
+ return hw_event;
|
|
|
|
+
|
|
switch (ev_source) {
|
|
switch (ev_source) {
|
|
case CCI_PORT_S0:
|
|
case CCI_PORT_S0:
|
|
case CCI_PORT_S1:
|
|
case CCI_PORT_S1:
|
|
@@ -289,18 +317,14 @@ static u32 pmu_get_max_counters(void)
|
|
static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
|
|
static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
|
|
{
|
|
{
|
|
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
|
|
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
|
|
- struct hw_perf_event *hw_event = &event->hw;
|
|
|
|
- unsigned long cci_event = hw_event->config_base;
|
|
|
|
|
|
+ unsigned long cci_event = event->hw.config_base;
|
|
int idx;
|
|
int idx;
|
|
|
|
|
|
- if (cci_event == CCI_PMU_CYCLES) {
|
|
|
|
- if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
|
|
|
|
- return -EAGAIN;
|
|
|
|
|
|
+ if (cci_pmu->model->get_event_idx)
|
|
|
|
+ return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
|
|
|
|
|
|
- return CCI_PMU_CYCLE_CNTR_IDX;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
|
|
|
|
|
|
+ /* Generic code to find an unused idx from the mask */
|
|
|
|
+ for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
|
|
if (!test_and_set_bit(idx, hw->used_mask))
|
|
if (!test_and_set_bit(idx, hw->used_mask))
|
|
return idx;
|
|
return idx;
|
|
|
|
|
|
@@ -310,19 +334,13 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
|
|
|
|
|
|
static int pmu_map_event(struct perf_event *event)
|
|
static int pmu_map_event(struct perf_event *event)
|
|
{
|
|
{
|
|
- int mapping;
|
|
|
|
- unsigned long config = event->attr.config;
|
|
|
|
|
|
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
|
|
|
|
|
|
- if (event->attr.type < PERF_TYPE_MAX)
|
|
|
|
|
|
+ if (event->attr.type < PERF_TYPE_MAX ||
|
|
|
|
+ !cci_pmu->model->validate_hw_event)
|
|
return -ENOENT;
|
|
return -ENOENT;
|
|
|
|
|
|
- if (config == CCI_PMU_CYCLES)
|
|
|
|
- mapping = config;
|
|
|
|
- else
|
|
|
|
- mapping = pmu_validate_hw_event(to_cci_pmu(event->pmu),
|
|
|
|
- config);
|
|
|
|
-
|
|
|
|
- return mapping;
|
|
|
|
|
|
+ return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
|
|
}
|
|
}
|
|
|
|
|
|
static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
|
|
static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
|
|
@@ -450,7 +468,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
|
|
* This should work regardless of whether we have per-counter overflow
|
|
* This should work regardless of whether we have per-counter overflow
|
|
* interrupt or a combined overflow interrupt.
|
|
* interrupt or a combined overflow interrupt.
|
|
*/
|
|
*/
|
|
- for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
|
|
|
|
|
|
+ for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
|
|
struct perf_event *event = events->events[idx];
|
|
struct perf_event *event = events->events[idx];
|
|
struct hw_perf_event *hw_counter;
|
|
struct hw_perf_event *hw_counter;
|
|
|
|
|
|
@@ -538,6 +556,16 @@ static void cci_pmu_disable(struct pmu *pmu)
|
|
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
|
|
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Check if the idx represents a non-programmable counter.
|
|
|
|
+ * All the fixed event counters are mapped before the programmable
|
|
|
|
+ * counters.
|
|
|
|
+ */
|
|
|
|
+static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
|
|
|
|
+{
|
|
|
|
+ return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
|
|
|
|
+}
|
|
|
|
+
|
|
static void cci_pmu_start(struct perf_event *event, int pmu_flags)
|
|
static void cci_pmu_start(struct perf_event *event, int pmu_flags)
|
|
{
|
|
{
|
|
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
|
|
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
|
|
@@ -562,8 +590,8 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags)
|
|
|
|
|
|
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
|
|
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
|
|
|
|
|
|
- /* Configure the event to count, unless you are counting cycles */
|
|
|
|
- if (idx != CCI_PMU_CYCLE_CNTR_IDX)
|
|
|
|
|
|
+ /* Configure the counter unless you are counting a fixed event */
|
|
|
|
+ if (!pmu_fixed_hw_idx(cci_pmu, idx))
|
|
pmu_set_event(cci_pmu, idx, hwc->config_base);
|
|
pmu_set_event(cci_pmu, idx, hwc->config_base);
|
|
|
|
|
|
pmu_event_set_period(event);
|
|
pmu_event_set_period(event);
|
|
@@ -907,6 +935,8 @@ static struct cci_pmu_model cci_pmu_models[] = {
|
|
CCI_REV_R0_MASTER_PORT_MAX_EV,
|
|
CCI_REV_R0_MASTER_PORT_MAX_EV,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
|
|
+ .validate_hw_event = cci400_validate_hw_event,
|
|
|
|
+ .get_event_idx = cci400_get_event_idx,
|
|
},
|
|
},
|
|
[CCI_REV_R1] = {
|
|
[CCI_REV_R1] = {
|
|
.name = "CCI_400_r1",
|
|
.name = "CCI_400_r1",
|
|
@@ -923,6 +953,8 @@ static struct cci_pmu_model cci_pmu_models[] = {
|
|
CCI_REV_R1_MASTER_PORT_MAX_EV,
|
|
CCI_REV_R1_MASTER_PORT_MAX_EV,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
|
|
+ .validate_hw_event = cci400_validate_hw_event,
|
|
|
|
+ .get_event_idx = cci400_get_event_idx,
|
|
},
|
|
},
|
|
};
|
|
};
|
|
|
|
|