|
@@ -18,6 +18,7 @@ static struct event_constraint constraint_empty =
|
|
|
EVENT_CONSTRAINT(0, 0, 0);
|
|
|
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
|
|
@@ -33,10 +34,81 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
|
|
|
DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
|
|
|
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7");
|
|
|
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15");
|
|
|
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23");
|
|
|
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
|
|
|
+
|
|
|
+static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ u64 count;
|
|
|
+
|
|
|
+ rdmsrl(event->hw.event_base, count);
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * generic get constraint function for shared match/mask registers.
|
|
|
+ */
|
|
|
+static struct event_constraint *
|
|
|
+uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
+ unsigned long flags;
|
|
|
+ bool ok = false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * reg->alloc can be set due to existing state, so for fake box we
|
|
|
+ * need to ignore this, otherwise we might fail to allocate proper
|
|
|
+ * fake state for this extra reg constraint.
|
|
|
+ */
|
|
|
+ if (reg1->idx == EXTRA_REG_NONE ||
|
|
|
+ (!uncore_box_is_fake(box) && reg1->alloc))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ er = &box->shared_regs[reg1->idx];
|
|
|
+ raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
+ if (!atomic_read(&er->ref) ||
|
|
|
+ (er->config1 == reg1->config && er->config2 == reg2->config)) {
|
|
|
+ atomic_inc(&er->ref);
|
|
|
+ er->config1 = reg1->config;
|
|
|
+ er->config2 = reg2->config;
|
|
|
+ ok = true;
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
+
|
|
|
+ if (ok) {
|
|
|
+ if (!uncore_box_is_fake(box))
|
|
|
+ reg1->alloc = 1;
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return &constraint_empty;
|
|
|
+}
|
|
|
+
|
|
|
+static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Only put constraint if extra reg was actually allocated. Also
|
|
|
+ * takes care of event which do not use an extra shared reg.
|
|
|
+ *
|
|
|
+ * Also, if this is a fake box we shouldn't touch any event state
|
|
|
+ * (reg->alloc) and we don't care about leaving inconsistent box
|
|
|
+ * state either since it will be thrown out.
|
|
|
+ */
|
|
|
+ if (uncore_box_is_fake(box) || !reg1->alloc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ er = &box->shared_regs[reg1->idx];
|
|
|
+ atomic_dec(&er->ref);
|
|
|
+ reg1->alloc = 0;
|
|
|
+}
|
|
|
|
|
|
/* Sandy Bridge-EP uncore support */
|
|
|
static struct intel_uncore_type snbep_uncore_cbox;
|
|
@@ -64,18 +136,15 @@ static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
|
|
|
pci_write_config_dword(pdev, box_ctl, config);
|
|
|
}
|
|
|
|
|
|
-static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct pci_dev *pdev = box->pci_dev;
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
|
- pci_write_config_dword(pdev, hwc->config_base, hwc->config |
|
|
|
- SNBEP_PMON_CTL_EN);
|
|
|
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
|
|
|
}
|
|
|
|
|
|
-static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct pci_dev *pdev = box->pci_dev;
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
@@ -83,8 +152,7 @@ static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
|
|
|
pci_write_config_dword(pdev, hwc->config_base, hwc->config);
|
|
|
}
|
|
|
|
|
|
-static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct pci_dev *pdev = box->pci_dev;
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
@@ -92,14 +160,15 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
|
|
|
|
|
|
pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
|
|
|
pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
|
|
|
+
|
|
|
return count;
|
|
|
}
|
|
|
|
|
|
static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
|
|
|
{
|
|
|
struct pci_dev *pdev = box->pci_dev;
|
|
|
- pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
|
|
|
- SNBEP_PMON_BOX_CTL_INT);
|
|
|
+
|
|
|
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
|
|
|
}
|
|
|
|
|
|
static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
|
|
@@ -112,7 +181,6 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
|
|
|
rdmsrl(msr, config);
|
|
|
config |= SNBEP_PMON_BOX_CTL_FRZ;
|
|
|
wrmsrl(msr, config);
|
|
|
- return;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -126,12 +194,10 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
|
|
|
rdmsrl(msr, config);
|
|
|
config &= ~SNBEP_PMON_BOX_CTL_FRZ;
|
|
|
wrmsrl(msr, config);
|
|
|
- return;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
@@ -150,68 +216,15 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
|
|
|
wrmsrl(hwc->config_base, hwc->config);
|
|
|
}
|
|
|
|
|
|
-static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
-{
|
|
|
- struct hw_perf_event *hwc = &event->hw;
|
|
|
- u64 count;
|
|
|
-
|
|
|
- rdmsrl(hwc->event_base, count);
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
|
|
|
{
|
|
|
unsigned msr = uncore_msr_box_ctl(box);
|
|
|
+
|
|
|
if (msr)
|
|
|
wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
|
|
|
}
|
|
|
|
|
|
-static struct event_constraint *
|
|
|
-snbep_uncore_get_constraint(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
-{
|
|
|
- struct intel_uncore_extra_reg *er;
|
|
|
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
- unsigned long flags;
|
|
|
- bool ok = false;
|
|
|
-
|
|
|
- if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc))
|
|
|
- return NULL;
|
|
|
-
|
|
|
- er = &box->shared_regs[reg1->idx];
|
|
|
- raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
- if (!atomic_read(&er->ref) || er->config1 == reg1->config) {
|
|
|
- atomic_inc(&er->ref);
|
|
|
- er->config1 = reg1->config;
|
|
|
- ok = true;
|
|
|
- }
|
|
|
- raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
-
|
|
|
- if (ok) {
|
|
|
- if (box->phys_id >= 0)
|
|
|
- reg1->alloc = 1;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- return &constraint_empty;
|
|
|
-}
|
|
|
-
|
|
|
-static void snbep_uncore_put_constraint(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
-{
|
|
|
- struct intel_uncore_extra_reg *er;
|
|
|
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
-
|
|
|
- if (box->phys_id < 0 || !reg1->alloc)
|
|
|
- return;
|
|
|
-
|
|
|
- er = &box->shared_regs[reg1->idx];
|
|
|
- atomic_dec(&er->ref);
|
|
|
- reg1->alloc = 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int snbep_uncore_hw_config(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
@@ -221,14 +234,16 @@ static int snbep_uncore_hw_config(struct intel_uncore_box *box,
|
|
|
SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
|
|
|
reg1->config = event->attr.config1 &
|
|
|
SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
|
|
|
- } else if (box->pmu->type == &snbep_uncore_pcu) {
|
|
|
- reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
|
|
|
- reg1->config = event->attr.config1 &
|
|
|
- SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
|
|
|
} else {
|
|
|
- return 0;
|
|
|
+ if (box->pmu->type == &snbep_uncore_pcu) {
|
|
|
+ reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
|
|
|
+ reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
|
|
|
+ } else {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
}
|
|
|
reg1->idx = 0;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -272,10 +287,19 @@ static struct attribute *snbep_uncore_pcu_formats_attr[] = {
|
|
|
&format_attr_thresh5.attr,
|
|
|
&format_attr_occ_invert.attr,
|
|
|
&format_attr_occ_edge.attr,
|
|
|
- &format_attr_filter_brand0.attr,
|
|
|
- &format_attr_filter_brand1.attr,
|
|
|
- &format_attr_filter_brand2.attr,
|
|
|
- &format_attr_filter_brand3.attr,
|
|
|
+ &format_attr_filter_band0.attr,
|
|
|
+ &format_attr_filter_band1.attr,
|
|
|
+ &format_attr_filter_band2.attr,
|
|
|
+ &format_attr_filter_band3.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute *snbep_uncore_qpi_formats_attr[] = {
|
|
|
+ &format_attr_event_ext.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_thresh8.attr,
|
|
|
NULL,
|
|
|
};
|
|
|
|
|
@@ -314,15 +338,20 @@ static struct attribute_group snbep_uncore_pcu_format_group = {
|
|
|
.attrs = snbep_uncore_pcu_formats_attr,
|
|
|
};
|
|
|
|
|
|
+static struct attribute_group snbep_uncore_qpi_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = snbep_uncore_qpi_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
static struct intel_uncore_ops snbep_uncore_msr_ops = {
|
|
|
.init_box = snbep_uncore_msr_init_box,
|
|
|
.disable_box = snbep_uncore_msr_disable_box,
|
|
|
.enable_box = snbep_uncore_msr_enable_box,
|
|
|
.disable_event = snbep_uncore_msr_disable_event,
|
|
|
.enable_event = snbep_uncore_msr_enable_event,
|
|
|
- .read_counter = snbep_uncore_msr_read_counter,
|
|
|
- .get_constraint = snbep_uncore_get_constraint,
|
|
|
- .put_constraint = snbep_uncore_put_constraint,
|
|
|
+ .read_counter = uncore_msr_read_counter,
|
|
|
+ .get_constraint = uncore_get_constraint,
|
|
|
+ .put_constraint = uncore_put_constraint,
|
|
|
.hw_config = snbep_uncore_hw_config,
|
|
|
};
|
|
|
|
|
@@ -485,8 +514,13 @@ static struct intel_uncore_type snbep_uncore_qpi = {
|
|
|
.num_counters = 4,
|
|
|
.num_boxes = 2,
|
|
|
.perf_ctr_bits = 48,
|
|
|
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
|
|
|
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
|
|
|
+ .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
|
|
|
+ .ops = &snbep_uncore_pci_ops,
|
|
|
.event_descs = snbep_uncore_qpi_events,
|
|
|
- SNBEP_UNCORE_PCI_COMMON_INIT(),
|
|
|
+ .format_group = &snbep_uncore_qpi_format_group,
|
|
|
};
|
|
|
|
|
|
|
|
@@ -589,188 +623,1208 @@ static void snbep_pci2phy_map_init(void)
|
|
|
/* get the Node ID mapping */
|
|
|
pci_read_config_dword(ubox_dev, 0x54, &config);
|
|
|
/*
|
|
|
- * every three bits in the Node ID mapping register maps
|
|
|
- * to a particular node.
|
|
|
+ * every three bits in the Node ID mapping register maps
|
|
|
+ * to a particular node.
|
|
|
+ */
|
|
|
+ for (i = 0; i < 8; i++) {
|
|
|
+ if (nodeid == ((config >> (3 * i)) & 0x7)) {
|
|
|
+ pcibus_to_physid[bus] = i;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ };
|
|
|
+ return;
|
|
|
+}
|
|
|
+/* end of Sandy Bridge-EP uncore support */
|
|
|
+
|
|
|
+/* Sandy Bridge uncore support */
|
|
|
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
|
|
|
+ else
|
|
|
+ wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
|
|
|
+}
|
|
|
+
|
|
|
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ wrmsrl(event->hw.config_base, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ if (box->pmu->pmu_idx == 0) {
|
|
|
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
|
|
|
+ SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static struct attribute *snb_uncore_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_cmask5.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group snb_uncore_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = snb_uncore_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_ops snb_uncore_msr_ops = {
|
|
|
+ .init_box = snb_uncore_msr_init_box,
|
|
|
+ .disable_event = snb_uncore_msr_disable_event,
|
|
|
+ .enable_event = snb_uncore_msr_enable_event,
|
|
|
+ .read_counter = uncore_msr_read_counter,
|
|
|
+};
|
|
|
+
|
|
|
+static struct event_constraint snb_uncore_cbox_constraints[] = {
|
|
|
+ UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
|
|
|
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
|
|
|
+ EVENT_CONSTRAINT_END
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type snb_uncore_cbox = {
|
|
|
+ .name = "cbox",
|
|
|
+ .num_counters = 2,
|
|
|
+ .num_boxes = 4,
|
|
|
+ .perf_ctr_bits = 44,
|
|
|
+ .fixed_ctr_bits = 48,
|
|
|
+ .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
|
|
|
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
|
|
|
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
|
|
|
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
|
|
|
+ .single_fixed = 1,
|
|
|
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
|
|
|
+ .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
|
|
|
+ .constraints = snb_uncore_cbox_constraints,
|
|
|
+ .ops = &snb_uncore_msr_ops,
|
|
|
+ .format_group = &snb_uncore_format_group,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type *snb_msr_uncores[] = {
|
|
|
+ &snb_uncore_cbox,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+/* end of Sandy Bridge uncore support */
|
|
|
+
|
|
|
+/* Nehalem uncore support */
|
|
|
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
|
|
|
+}
|
|
|
+
|
|
|
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
|
|
|
+ else
|
|
|
+ wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
|
|
|
+}
|
|
|
+
|
|
|
+static struct attribute *nhm_uncore_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_cmask8.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group nhm_uncore_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhm_uncore_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct uncore_event_desc nhm_uncore_events[] = {
|
|
|
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
|
|
|
+ { /* end: all zeroes */ },
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
|
|
|
+ .disable_box = nhm_uncore_msr_disable_box,
|
|
|
+ .enable_box = nhm_uncore_msr_enable_box,
|
|
|
+ .disable_event = snb_uncore_msr_disable_event,
|
|
|
+ .enable_event = nhm_uncore_msr_enable_event,
|
|
|
+ .read_counter = uncore_msr_read_counter,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhm_uncore = {
|
|
|
+ .name = "",
|
|
|
+ .num_counters = 8,
|
|
|
+ .num_boxes = 1,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .fixed_ctr_bits = 48,
|
|
|
+ .event_ctl = NHM_UNC_PERFEVTSEL0,
|
|
|
+ .perf_ctr = NHM_UNC_UNCORE_PMC0,
|
|
|
+ .fixed_ctr = NHM_UNC_FIXED_CTR,
|
|
|
+ .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
|
|
|
+ .event_mask = NHM_UNC_RAW_EVENT_MASK,
|
|
|
+ .event_descs = nhm_uncore_events,
|
|
|
+ .ops = &nhm_uncore_msr_ops,
|
|
|
+ .format_group = &nhm_uncore_format_group,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type *nhm_msr_uncores[] = {
|
|
|
+ &nhm_uncore,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+/* end of Nehalem uncore support */
|
|
|
+
|
|
|
+/* Nehalem-EX uncore support */
|
|
|
+#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
|
|
|
+ ((1ULL << (n)) - 1)))
|
|
|
+
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
|
|
|
+
|
|
|
+static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ unsigned msr = uncore_msr_box_ctl(box);
|
|
|
+ u64 config;
|
|
|
+
|
|
|
+ if (msr) {
|
|
|
+ rdmsrl(msr, config);
|
|
|
+ config &= ~((1ULL << uncore_num_counters(box)) - 1);
|
|
|
+ /* WBox has a fixed counter */
|
|
|
+ if (uncore_msr_fixed_ctl(box))
|
|
|
+ config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
|
|
|
+ wrmsrl(msr, config);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
|
|
|
+{
|
|
|
+ unsigned msr = uncore_msr_box_ctl(box);
|
|
|
+ u64 config;
|
|
|
+
|
|
|
+ if (msr) {
|
|
|
+ rdmsrl(msr, config);
|
|
|
+ config |= (1ULL << uncore_num_counters(box)) - 1;
|
|
|
+ /* WBox has a fixed counter */
|
|
|
+ if (uncore_msr_fixed_ctl(box))
|
|
|
+ config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
|
|
|
+ wrmsrl(msr, config);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ wrmsrl(event->hw.config_base, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
|
|
|
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
|
|
|
+ else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
|
|
|
+ else
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
|
|
|
+}
|
|
|
+
|
|
|
+#define NHMEX_UNCORE_OPS_COMMON_INIT() \
|
|
|
+ .init_box = nhmex_uncore_msr_init_box, \
|
|
|
+ .disable_box = nhmex_uncore_msr_disable_box, \
|
|
|
+ .enable_box = nhmex_uncore_msr_enable_box, \
|
|
|
+ .disable_event = nhmex_uncore_msr_disable_event, \
|
|
|
+ .read_counter = uncore_msr_read_counter
|
|
|
+
|
|
|
+static struct intel_uncore_ops nhmex_uncore_ops = {
|
|
|
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
|
|
|
+ .enable_event = nhmex_uncore_msr_enable_event,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group nhmex_uncore_ubox_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhmex_uncore_ubox_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhmex_uncore_ubox = {
|
|
|
+ .name = "ubox",
|
|
|
+ .num_counters = 1,
|
|
|
+ .num_boxes = 1,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
|
|
|
+ .perf_ctr = NHMEX_U_MSR_PMON_CTR,
|
|
|
+ .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
|
|
|
+ .ops = &nhmex_uncore_ops,
|
|
|
+ .format_group = &nhmex_uncore_ubox_format_group
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_thresh8.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group nhmex_uncore_cbox_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhmex_uncore_cbox_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhmex_uncore_cbox = {
|
|
|
+ .name = "cbox",
|
|
|
+ .num_counters = 6,
|
|
|
+ .num_boxes = 8,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
|
|
|
+ .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
|
|
|
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
|
|
|
+ .msr_offset = NHMEX_C_MSR_OFFSET,
|
|
|
+ .pair_ctr_ctl = 1,
|
|
|
+ .ops = &nhmex_uncore_ops,
|
|
|
+ .format_group = &nhmex_uncore_cbox_format_group
|
|
|
+};
|
|
|
+
|
|
|
+static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
|
|
|
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
|
|
|
+ { /* end: all zeroes */ },
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhmex_uncore_wbox = {
|
|
|
+ .name = "wbox",
|
|
|
+ .num_counters = 4,
|
|
|
+ .num_boxes = 1,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_W_MSR_PMON_CNT0,
|
|
|
+ .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
|
|
|
+ .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
|
|
|
+ .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
|
|
|
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
|
|
|
+ .pair_ctr_ctl = 1,
|
|
|
+ .event_descs = nhmex_uncore_wbox_events,
|
|
|
+ .ops = &nhmex_uncore_ops,
|
|
|
+ .format_group = &nhmex_uncore_cbox_format_group
|
|
|
+};
|
|
|
+
|
|
|
+static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
|
|
|
+ int ctr, ev_sel;
|
|
|
+
|
|
|
+ ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
|
|
|
+ NHMEX_B_PMON_CTR_SHIFT;
|
|
|
+ ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
|
|
|
+ NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
|
|
|
+
|
|
|
+ /* events that do not use the match/mask registers */
|
|
|
+ if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
|
|
|
+ (ctr == 2 && ev_sel != 0x4) || ctr == 3)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (box->pmu->pmu_idx == 0)
|
|
|
+ reg1->reg = NHMEX_B0_MSR_MATCH;
|
|
|
+ else
|
|
|
+ reg1->reg = NHMEX_B1_MSR_MATCH;
|
|
|
+ reg1->idx = 0;
|
|
|
+ reg1->config = event->attr.config1;
|
|
|
+ reg2->config = event->attr.config2;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
|
|
|
+
|
|
|
+ if (reg1->idx != EXTRA_REG_NONE) {
|
|
|
+ wrmsrl(reg1->reg, reg1->config);
|
|
|
+ wrmsrl(reg1->reg + 1, reg2->config);
|
|
|
+ }
|
|
|
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
|
|
|
+ (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * The Bbox has 4 counters, but each counter monitors different events.
|
|
|
+ * Use bits 6-7 in the event config to select counter.
|
|
|
+ */
|
|
|
+static struct event_constraint nhmex_uncore_bbox_constraints[] = {
|
|
|
+ EVENT_CONSTRAINT(0 , 1, 0xc0),
|
|
|
+ EVENT_CONSTRAINT(0x40, 2, 0xc0),
|
|
|
+ EVENT_CONSTRAINT(0x80, 4, 0xc0),
|
|
|
+ EVENT_CONSTRAINT(0xc0, 8, 0xc0),
|
|
|
+ EVENT_CONSTRAINT_END,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
|
|
|
+ &format_attr_event5.attr,
|
|
|
+ &format_attr_counter.attr,
|
|
|
+ &format_attr_match.attr,
|
|
|
+ &format_attr_mask.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group nhmex_uncore_bbox_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhmex_uncore_bbox_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
|
|
|
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
|
|
|
+ .enable_event = nhmex_bbox_msr_enable_event,
|
|
|
+ .hw_config = nhmex_bbox_hw_config,
|
|
|
+ .get_constraint = uncore_get_constraint,
|
|
|
+ .put_constraint = uncore_put_constraint,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhmex_uncore_bbox = {
|
|
|
+ .name = "bbox",
|
|
|
+ .num_counters = 4,
|
|
|
+ .num_boxes = 2,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
|
|
|
+ .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
|
|
|
+ .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
|
|
|
+ .msr_offset = NHMEX_B_MSR_OFFSET,
|
|
|
+ .pair_ctr_ctl = 1,
|
|
|
+ .num_shared_regs = 1,
|
|
|
+ .constraints = nhmex_uncore_bbox_constraints,
|
|
|
+ .ops = &nhmex_uncore_bbox_ops,
|
|
|
+ .format_group = &nhmex_uncore_bbox_format_group
|
|
|
+};
|
|
|
+
|
|
|
+static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
+
|
|
|
+ if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) {
|
|
|
+ reg1->config = event->attr.config1;
|
|
|
+ reg2->config = event->attr.config2;
|
|
|
+ } else {
|
|
|
+ reg1->config = ~0ULL;
|
|
|
+ reg2->config = ~0ULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (box->pmu->pmu_idx == 0)
|
|
|
+ reg1->reg = NHMEX_S0_MSR_MM_CFG;
|
|
|
+ else
|
|
|
+ reg1->reg = NHMEX_S1_MSR_MM_CFG;
|
|
|
+
|
|
|
+ reg1->idx = 0;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
|
|
|
+
|
|
|
+ wrmsrl(reg1->reg, 0);
|
|
|
+ if (reg1->config != ~0ULL || reg2->config != ~0ULL) {
|
|
|
+ wrmsrl(reg1->reg + 1, reg1->config);
|
|
|
+ wrmsrl(reg1->reg + 2, reg2->config);
|
|
|
+ wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
|
|
|
+ }
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
|
|
|
+}
|
|
|
+
|
|
|
+static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
|
|
|
+ &format_attr_event.attr,
|
|
|
+ &format_attr_umask.attr,
|
|
|
+ &format_attr_edge.attr,
|
|
|
+ &format_attr_inv.attr,
|
|
|
+ &format_attr_thresh8.attr,
|
|
|
+ &format_attr_mm_cfg.attr,
|
|
|
+ &format_attr_match.attr,
|
|
|
+ &format_attr_mask.attr,
|
|
|
+ NULL,
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute_group nhmex_uncore_sbox_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhmex_uncore_sbox_formats_attr,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
|
|
|
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
|
|
|
+ .enable_event = nhmex_sbox_msr_enable_event,
|
|
|
+ .hw_config = nhmex_sbox_hw_config,
|
|
|
+ .get_constraint = uncore_get_constraint,
|
|
|
+ .put_constraint = uncore_put_constraint,
|
|
|
+};
|
|
|
+
|
|
|
+static struct intel_uncore_type nhmex_uncore_sbox = {
|
|
|
+ .name = "sbox",
|
|
|
+ .num_counters = 4,
|
|
|
+ .num_boxes = 2,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
|
|
|
+ .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
|
|
|
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
|
|
|
+ .msr_offset = NHMEX_S_MSR_OFFSET,
|
|
|
+ .pair_ctr_ctl = 1,
|
|
|
+ .num_shared_regs = 1,
|
|
|
+ .ops = &nhmex_uncore_sbox_ops,
|
|
|
+ .format_group = &nhmex_uncore_sbox_format_group
|
|
|
+};
|
|
|
+
|
|
|
+enum {
|
|
|
+ EXTRA_REG_NHMEX_M_FILTER,
|
|
|
+ EXTRA_REG_NHMEX_M_DSP,
|
|
|
+ EXTRA_REG_NHMEX_M_ISS,
|
|
|
+ EXTRA_REG_NHMEX_M_MAP,
|
|
|
+ EXTRA_REG_NHMEX_M_MSC_THR,
|
|
|
+ EXTRA_REG_NHMEX_M_PGT,
|
|
|
+ EXTRA_REG_NHMEX_M_PLD,
|
|
|
+ EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
|
|
|
+};
|
|
|
+
|
|
|
+static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
|
|
|
+ /* event 0xa uses two extra registers */
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
|
|
|
+ /* events 0xd ~ 0x10 use the same extra register */
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
|
|
|
+ MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
|
|
|
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
|
|
|
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
|
|
|
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
|
|
|
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
|
|
|
+ EVENT_EXTRA_END
|
|
|
+};
|
|
|
+
|
|
|
+static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
|
|
|
+{
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ unsigned long flags;
|
|
|
+ bool ret = false;
|
|
|
+ u64 mask;
|
|
|
+
|
|
|
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
|
|
|
+ er = &box->shared_regs[idx];
|
|
|
+ raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
+ if (!atomic_read(&er->ref) || er->config == config) {
|
|
|
+ atomic_inc(&er->ref);
|
|
|
+ er->config = config;
|
|
|
+ ret = true;
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * The ZDP_CTL_FVC MSR has 4 fields which are used to control
|
|
|
+ * events 0xd ~ 0x10. Besides these 4 fields, there are additional
|
|
|
+ * fields which are shared.
|
|
|
+ */
|
|
|
+ idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
|
|
|
+ if (WARN_ON_ONCE(idx >= 4))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* mask of the shared fields */
|
|
|
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
|
|
|
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
+ /* add mask of the non-shared field if it's in use */
|
|
|
+ if (__BITS_VALUE(atomic_read(&er->ref), idx, 8))
|
|
|
+ mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
|
|
|
+
|
|
|
+ if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
|
|
|
+ atomic_add(1 << (idx * 8), &er->ref);
|
|
|
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
|
|
|
+ NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
|
|
|
+ er->config &= ~mask;
|
|
|
+ er->config |= (config & mask);
|
|
|
+ ret = true;
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
|
|
|
+{
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+
|
|
|
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
|
|
|
+ er = &box->shared_regs[idx];
|
|
|
+ atomic_dec(&er->ref);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
|
|
|
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
|
|
|
+ atomic_sub(1 << (idx * 8), &er->ref);
|
|
|
+}
|
|
|
+
|
|
|
+u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
|
|
|
+ u64 config = reg1->config;
|
|
|
+
|
|
|
+ /* get the non-shared control bits and shift them */
|
|
|
+ idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
|
|
|
+ config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
|
|
|
+ if (new_idx > orig_idx) {
|
|
|
+ idx = new_idx - orig_idx;
|
|
|
+ config <<= 3 * idx;
|
|
|
+ } else {
|
|
|
+ idx = orig_idx - new_idx;
|
|
|
+ config >>= 3 * idx;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* add the shared control bits back */
|
|
|
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
|
|
|
+ if (modify) {
|
|
|
+ /* adjust the main event selector */
|
|
|
+ if (new_idx > orig_idx)
|
|
|
+ hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
|
|
|
+ else
|
|
|
+ hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
|
|
|
+ reg1->config = config;
|
|
|
+ reg1->idx = ~0xff | new_idx;
|
|
|
+ }
|
|
|
+ return config;
|
|
|
+}
|
|
|
+
|
|
|
+static struct event_constraint *
|
|
|
+nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
+ int i, idx[2], alloc = 0;
|
|
|
+ u64 config1 = reg1->config;
|
|
|
+
|
|
|
+ idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
|
|
|
+ idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
|
|
|
+again:
|
|
|
+ for (i = 0; i < 2; i++) {
|
|
|
+ if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
|
|
|
+ idx[i] = 0xff;
|
|
|
+
|
|
|
+ if (idx[i] == 0xff)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (!nhmex_mbox_get_shared_reg(box, idx[i],
|
|
|
+ __BITS_VALUE(config1, i, 32)))
|
|
|
+ goto fail;
|
|
|
+ alloc |= (0x1 << i);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* for the match/mask registers */
|
|
|
+ if ((uncore_box_is_fake(box) || !reg2->alloc) &&
|
|
|
+ !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
|
|
|
+ goto fail;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If it's a fake box -- as per validate_{group,event}() we
|
|
|
+ * shouldn't touch event state and we can avoid doing so
|
|
|
+ * since both will only call get_event_constraints() once
|
|
|
+ * on each event, this avoids the need for reg->alloc.
|
|
|
+ */
|
|
|
+ if (!uncore_box_is_fake(box)) {
|
|
|
+ if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
|
|
|
+ nhmex_mbox_alter_er(event, idx[0], true);
|
|
|
+ reg1->alloc |= alloc;
|
|
|
+ reg2->alloc = 1;
|
|
|
+ }
|
|
|
+ return NULL;
|
|
|
+fail:
|
|
|
+ if (idx[0] != 0xff && !(alloc & 0x1) &&
|
|
|
+ idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
|
|
|
+ /*
|
|
|
+ * events 0xd ~ 0x10 are functional identical, but are
|
|
|
+ * controlled by different fields in the ZDP_CTL_FVC
|
|
|
+ * register. If we failed to take one field, try the
|
|
|
+ * rest 3 choices.
|
|
|
*/
|
|
|
- for (i = 0; i < 8; i++) {
|
|
|
- if (nodeid == ((config >> (3 * i)) & 0x7)) {
|
|
|
- pcibus_to_physid[bus] = i;
|
|
|
- break;
|
|
|
- }
|
|
|
+ BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
|
|
|
+ idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
|
|
|
+ idx[0] = (idx[0] + 1) % 4;
|
|
|
+ idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
|
|
|
+ if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
|
|
|
+ config1 = nhmex_mbox_alter_er(event, idx[0], false);
|
|
|
+ goto again;
|
|
|
}
|
|
|
- };
|
|
|
- return;
|
|
|
-}
|
|
|
-/* end of Sandy Bridge-EP uncore support */
|
|
|
+ }
|
|
|
|
|
|
+ if (alloc & 0x1)
|
|
|
+ nhmex_mbox_put_shared_reg(box, idx[0]);
|
|
|
+ if (alloc & 0x2)
|
|
|
+ nhmex_mbox_put_shared_reg(box, idx[1]);
|
|
|
+ return &constraint_empty;
|
|
|
+}
|
|
|
|
|
|
-/* Sandy Bridge uncore support */
|
|
|
-static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
- struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
|
|
|
- if (hwc->idx < UNCORE_PMC_IDX_FIXED)
|
|
|
- wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
|
|
|
- else
|
|
|
- wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
|
|
|
+ if (uncore_box_is_fake(box))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (reg1->alloc & 0x1)
|
|
|
+ nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
|
|
|
+ if (reg1->alloc & 0x2)
|
|
|
+ nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
|
|
|
+ reg1->alloc = 0;
|
|
|
+
|
|
|
+ if (reg2->alloc) {
|
|
|
+ nhmex_mbox_put_shared_reg(box, reg2->idx);
|
|
|
+ reg2->alloc = 0;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
|
|
|
{
|
|
|
- wrmsrl(event->hw.config_base, 0);
|
|
|
+ if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
|
|
|
+ return er->idx;
|
|
|
+ return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
|
|
|
}
|
|
|
|
|
|
-static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
- u64 count;
|
|
|
- rdmsrl(event->hw.event_base, count);
|
|
|
- return count;
|
|
|
+ struct intel_uncore_type *type = box->pmu->type;
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
+ struct extra_reg *er;
|
|
|
+ unsigned msr;
|
|
|
+ int reg_idx = 0;
|
|
|
+
|
|
|
+ if (WARN_ON_ONCE(reg1->idx != -1))
|
|
|
+ return -EINVAL;
|
|
|
+ /*
|
|
|
+ * The mbox events may require 2 extra MSRs at the most. But only
|
|
|
+ * the lower 32 bits in these MSRs are significant, so we can use
|
|
|
+ * config1 to pass two MSRs' config.
|
|
|
+ */
|
|
|
+ for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
|
|
|
+ if (er->event != (event->hw.config & er->config_mask))
|
|
|
+ continue;
|
|
|
+ if (event->attr.config1 & ~er->valid_mask)
|
|
|
+ return -EINVAL;
|
|
|
+ if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
|
|
|
+ er->idx == __BITS_VALUE(reg1->idx, 1, 8))
|
|
|
+ continue;
|
|
|
+ if (WARN_ON_ONCE(reg_idx >= 2))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
|
|
|
+ if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* always use the 32~63 bits to pass the PLD config */
|
|
|
+ if (er->idx == EXTRA_REG_NHMEX_M_PLD)
|
|
|
+ reg_idx = 1;
|
|
|
+
|
|
|
+ reg1->idx &= ~(0xff << (reg_idx * 8));
|
|
|
+ reg1->reg &= ~(0xffff << (reg_idx * 16));
|
|
|
+ reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
|
|
|
+ reg1->reg |= msr << (reg_idx * 16);
|
|
|
+ reg1->config = event->attr.config1;
|
|
|
+ reg_idx++;
|
|
|
+ }
|
|
|
+ /* use config2 to pass the filter config */
|
|
|
+ reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
|
|
|
+ if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
|
|
|
+ reg2->config = event->attr.config2;
|
|
|
+ else
|
|
|
+ reg2->config = ~0ULL;
|
|
|
+ if (box->pmu->pmu_idx == 0)
|
|
|
+ reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
|
|
|
+ else
|
|
|
+ reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
|
|
|
+static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
|
|
|
{
|
|
|
- if (box->pmu->pmu_idx == 0) {
|
|
|
- wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
|
|
|
- SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
|
|
|
- }
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ unsigned long flags;
|
|
|
+ u64 config;
|
|
|
+
|
|
|
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
|
|
|
+ return box->shared_regs[idx].config;
|
|
|
+
|
|
|
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
|
|
|
+ raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
+ config = er->config;
|
|
|
+ raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
+ return config;
|
|
|
}
|
|
|
|
|
|
-static struct attribute *snb_uncore_formats_attr[] = {
|
|
|
- &format_attr_event.attr,
|
|
|
- &format_attr_umask.attr,
|
|
|
- &format_attr_edge.attr,
|
|
|
- &format_attr_inv.attr,
|
|
|
- &format_attr_cmask5.attr,
|
|
|
+static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ idx = __BITS_VALUE(reg1->idx, 0, 8);
|
|
|
+ if (idx != 0xff)
|
|
|
+ wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
|
|
|
+ nhmex_mbox_shared_reg_config(box, idx));
|
|
|
+ idx = __BITS_VALUE(reg1->idx, 1, 8);
|
|
|
+ if (idx != 0xff)
|
|
|
+ wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
|
|
|
+ nhmex_mbox_shared_reg_config(box, idx));
|
|
|
+
|
|
|
+ wrmsrl(reg2->reg, 0);
|
|
|
+ if (reg2->config != ~0ULL) {
|
|
|
+ wrmsrl(reg2->reg + 1,
|
|
|
+ reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
|
|
|
+ wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
|
|
|
+ (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
|
|
|
+ wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
|
|
|
+ }
|
|
|
+
|
|
|
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
|
|
|
+}
|
|
|
+
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
|
|
|
+
|
|
|
+static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
|
|
|
+ &format_attr_count_mode.attr,
|
|
|
+ &format_attr_storage_mode.attr,
|
|
|
+ &format_attr_wrap_mode.attr,
|
|
|
+ &format_attr_flag_mode.attr,
|
|
|
+ &format_attr_inc_sel.attr,
|
|
|
+ &format_attr_set_flag_sel.attr,
|
|
|
+ &format_attr_filter_cfg.attr,
|
|
|
+ &format_attr_filter_match.attr,
|
|
|
+ &format_attr_filter_mask.attr,
|
|
|
+ &format_attr_dsp.attr,
|
|
|
+ &format_attr_thr.attr,
|
|
|
+ &format_attr_fvc.attr,
|
|
|
+ &format_attr_pgt.attr,
|
|
|
+ &format_attr_map.attr,
|
|
|
+ &format_attr_iss.attr,
|
|
|
+ &format_attr_pld.attr,
|
|
|
NULL,
|
|
|
};
|
|
|
|
|
|
-static struct attribute_group snb_uncore_format_group = {
|
|
|
- .name = "format",
|
|
|
- .attrs = snb_uncore_formats_attr,
|
|
|
+static struct attribute_group nhmex_uncore_mbox_format_group = {
|
|
|
+ .name = "format",
|
|
|
+ .attrs = nhmex_uncore_mbox_formats_attr,
|
|
|
};
|
|
|
|
|
|
-static struct intel_uncore_ops snb_uncore_msr_ops = {
|
|
|
- .init_box = snb_uncore_msr_init_box,
|
|
|
- .disable_event = snb_uncore_msr_disable_event,
|
|
|
- .enable_event = snb_uncore_msr_enable_event,
|
|
|
- .read_counter = snb_uncore_msr_read_counter,
|
|
|
+static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
|
|
|
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
|
|
|
+ { /* end: all zeroes */ },
|
|
|
};
|
|
|
|
|
|
-static struct event_constraint snb_uncore_cbox_constraints[] = {
|
|
|
- UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
|
|
|
- UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
|
|
|
- EVENT_CONSTRAINT_END
|
|
|
+static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
|
|
|
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
|
|
|
+ .enable_event = nhmex_mbox_msr_enable_event,
|
|
|
+ .hw_config = nhmex_mbox_hw_config,
|
|
|
+ .get_constraint = nhmex_mbox_get_constraint,
|
|
|
+ .put_constraint = nhmex_mbox_put_constraint,
|
|
|
};
|
|
|
|
|
|
-static struct intel_uncore_type snb_uncore_cbox = {
|
|
|
- .name = "cbox",
|
|
|
- .num_counters = 2,
|
|
|
- .num_boxes = 4,
|
|
|
- .perf_ctr_bits = 44,
|
|
|
- .fixed_ctr_bits = 48,
|
|
|
- .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
|
|
|
- .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
|
|
|
- .fixed_ctr = SNB_UNC_FIXED_CTR,
|
|
|
- .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
|
|
|
- .single_fixed = 1,
|
|
|
- .event_mask = SNB_UNC_RAW_EVENT_MASK,
|
|
|
- .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
|
|
|
- .constraints = snb_uncore_cbox_constraints,
|
|
|
- .ops = &snb_uncore_msr_ops,
|
|
|
- .format_group = &snb_uncore_format_group,
|
|
|
+static struct intel_uncore_type nhmex_uncore_mbox = {
|
|
|
+ .name = "mbox",
|
|
|
+ .num_counters = 6,
|
|
|
+ .num_boxes = 2,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
|
|
|
+ .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
|
|
|
+ .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
|
|
|
+ .msr_offset = NHMEX_M_MSR_OFFSET,
|
|
|
+ .pair_ctr_ctl = 1,
|
|
|
+ .num_shared_regs = 8,
|
|
|
+ .event_descs = nhmex_uncore_mbox_events,
|
|
|
+ .ops = &nhmex_uncore_mbox_ops,
|
|
|
+ .format_group = &nhmex_uncore_mbox_format_group,
|
|
|
};
|
|
|
|
|
|
-static struct intel_uncore_type *snb_msr_uncores[] = {
|
|
|
- &snb_uncore_cbox,
|
|
|
- NULL,
|
|
|
-};
|
|
|
-/* end of Sandy Bridge uncore support */
|
|
|
+void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ int port;
|
|
|
|
|
|
-/* Nehalem uncore support */
|
|
|
-static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
|
|
|
+ /* adjust the main event selector */
|
|
|
+ if (reg1->idx % 2) {
|
|
|
+ reg1->idx--;
|
|
|
+ hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
|
|
|
+ } else {
|
|
|
+ reg1->idx++;
|
|
|
+ hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* adjust address or config of extra register */
|
|
|
+ port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
|
|
|
+ switch (reg1->idx % 6) {
|
|
|
+ case 0:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
|
|
|
+ break;
|
|
|
+ case 1:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ /* the 8~15 bits to the 0~7 bits */
|
|
|
+ reg1->config >>= 8;
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ /* the 0~7 bits to the 8~15 bits */
|
|
|
+ reg1->config <<= 8;
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
|
|
|
+ break;
|
|
|
+ case 5:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
|
|
|
+ break;
|
|
|
+ };
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
|
|
|
+ * An event set consists of 6 events, the 3rd and 4th events in
|
|
|
+ * an event set use the same extra register. So an event set uses
|
|
|
+ * 5 extra registers.
|
|
|
+ */
|
|
|
+static struct event_constraint *
|
|
|
+nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ unsigned long flags;
|
|
|
+ int idx, er_idx;
|
|
|
+ u64 config1;
|
|
|
+ bool ok = false;
|
|
|
+
|
|
|
+ if (!uncore_box_is_fake(box) && reg1->alloc)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ idx = reg1->idx % 6;
|
|
|
+ config1 = reg1->config;
|
|
|
+again:
|
|
|
+ er_idx = idx;
|
|
|
+ /* the 3rd and 4th events use the same extra register */
|
|
|
+ if (er_idx > 2)
|
|
|
+ er_idx--;
|
|
|
+ er_idx += (reg1->idx / 6) * 5;
|
|
|
+
|
|
|
+ er = &box->shared_regs[er_idx];
|
|
|
+ raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
+ if (idx < 2) {
|
|
|
+ if (!atomic_read(&er->ref) || er->config == reg1->config) {
|
|
|
+ atomic_inc(&er->ref);
|
|
|
+ er->config = reg1->config;
|
|
|
+ ok = true;
|
|
|
+ }
|
|
|
+ } else if (idx == 2 || idx == 3) {
|
|
|
+ /*
|
|
|
+ * these two events use different fields in a extra register,
|
|
|
+ * the 0~7 bits and the 8~15 bits respectively.
|
|
|
+ */
|
|
|
+ u64 mask = 0xff << ((idx - 2) * 8);
|
|
|
+ if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
|
|
|
+ !((er->config ^ config1) & mask)) {
|
|
|
+ atomic_add(1 << ((idx - 2) * 8), &er->ref);
|
|
|
+ er->config &= ~mask;
|
|
|
+ er->config |= config1 & mask;
|
|
|
+ ok = true;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!atomic_read(&er->ref) ||
|
|
|
+ (er->config == (hwc->config >> 32) &&
|
|
|
+ er->config1 == reg1->config &&
|
|
|
+ er->config2 == reg2->config)) {
|
|
|
+ atomic_inc(&er->ref);
|
|
|
+ er->config = (hwc->config >> 32);
|
|
|
+ er->config1 = reg1->config;
|
|
|
+ er->config2 = reg2->config;
|
|
|
+ ok = true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
+
|
|
|
+ if (!ok) {
|
|
|
+ /*
|
|
|
+ * The Rbox events are always in pairs. The paired
|
|
|
+ * events are functional identical, but use different
|
|
|
+ * extra registers. If we failed to take an extra
|
|
|
+ * register, try the alternative.
|
|
|
+ */
|
|
|
+ if (idx % 2)
|
|
|
+ idx--;
|
|
|
+ else
|
|
|
+ idx++;
|
|
|
+ if (idx != reg1->idx % 6) {
|
|
|
+ if (idx == 2)
|
|
|
+ config1 >>= 8;
|
|
|
+ else if (idx == 3)
|
|
|
+ config1 <<= 8;
|
|
|
+ goto again;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!uncore_box_is_fake(box)) {
|
|
|
+ if (idx != reg1->idx % 6)
|
|
|
+ nhmex_rbox_alter_er(box, event);
|
|
|
+ reg1->alloc = 1;
|
|
|
+ }
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ return &constraint_empty;
|
|
|
}
|
|
|
|
|
|
-static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
|
|
|
+static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
|
|
|
- NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ int idx, er_idx;
|
|
|
+
|
|
|
+ if (uncore_box_is_fake(box) || !reg1->alloc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ idx = reg1->idx % 6;
|
|
|
+ er_idx = idx;
|
|
|
+ if (er_idx > 2)
|
|
|
+ er_idx--;
|
|
|
+ er_idx += (reg1->idx / 6) * 5;
|
|
|
+
|
|
|
+ er = &box->shared_regs[er_idx];
|
|
|
+ if (idx == 2 || idx == 3)
|
|
|
+ atomic_sub(1 << ((idx - 2) * 8), &er->ref);
|
|
|
+ else
|
|
|
+ atomic_dec(&er->ref);
|
|
|
+
|
|
|
+ reg1->alloc = 0;
|
|
|
}
|
|
|
|
|
|
-static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
|
|
|
+ int port, idx;
|
|
|
|
|
|
- if (hwc->idx < UNCORE_PMC_IDX_FIXED)
|
|
|
- wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
|
|
|
- else
|
|
|
- wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
|
|
|
+ idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
|
|
|
+ NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
|
|
|
+ if (idx >= 0x18)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ reg1->idx = idx;
|
|
|
+ reg1->config = event->attr.config1;
|
|
|
+
|
|
|
+ port = idx / 6 + box->pmu->pmu_idx * 4;
|
|
|
+ idx %= 6;
|
|
|
+ switch (idx) {
|
|
|
+ case 0:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
|
|
|
+ break;
|
|
|
+ case 1:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ case 3:
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ case 5:
|
|
|
+ if (idx == 4)
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
|
|
|
+ else
|
|
|
+ reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
|
|
|
+ reg2->config = event->attr.config2;
|
|
|
+ hwc->config |= event->attr.config & (~0ULL << 32);
|
|
|
+ break;
|
|
|
+ };
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static struct attribute *nhm_uncore_formats_attr[] = {
|
|
|
- &format_attr_event.attr,
|
|
|
- &format_attr_umask.attr,
|
|
|
- &format_attr_edge.attr,
|
|
|
- &format_attr_inv.attr,
|
|
|
- &format_attr_cmask8.attr,
|
|
|
+static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
|
|
|
+{
|
|
|
+ struct intel_uncore_extra_reg *er;
|
|
|
+ unsigned long flags;
|
|
|
+ u64 config;
|
|
|
+
|
|
|
+ er = &box->shared_regs[idx];
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&er->lock, flags);
|
|
|
+ config = er->config;
|
|
|
+ raw_spin_unlock_irqrestore(&er->lock, flags);
|
|
|
+
|
|
|
+ return config;
|
|
|
+}
|
|
|
+
|
|
|
+static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
|
|
|
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
|
|
|
+ int idx, er_idx;
|
|
|
+
|
|
|
+ idx = reg1->idx % 6;
|
|
|
+ er_idx = idx;
|
|
|
+ if (er_idx > 2)
|
|
|
+ er_idx--;
|
|
|
+ er_idx += (reg1->idx / 6) * 5;
|
|
|
+
|
|
|
+ switch (idx) {
|
|
|
+ case 0:
|
|
|
+ case 1:
|
|
|
+ wrmsrl(reg1->reg, reg1->config);
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ case 3:
|
|
|
+ wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx));
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ case 5:
|
|
|
+ wrmsrl(reg1->reg, reg1->config);
|
|
|
+ wrmsrl(reg1->reg + 1, hwc->config >> 32);
|
|
|
+ wrmsrl(reg1->reg + 2, reg2->config);
|
|
|
+ break;
|
|
|
+ };
|
|
|
+
|
|
|
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
|
|
|
+ (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
|
|
|
+}
|
|
|
+
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
|
|
|
+DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
|
|
|
+
|
|
|
+static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
|
|
|
+ &format_attr_event5.attr,
|
|
|
+ &format_attr_xbr_mm_cfg.attr,
|
|
|
+ &format_attr_xbr_match.attr,
|
|
|
+ &format_attr_xbr_mask.attr,
|
|
|
+ &format_attr_qlx_cfg.attr,
|
|
|
+ &format_attr_iperf_cfg.attr,
|
|
|
NULL,
|
|
|
};
|
|
|
|
|
|
-static struct attribute_group nhm_uncore_format_group = {
|
|
|
+static struct attribute_group nhmex_uncore_rbox_format_group = {
|
|
|
.name = "format",
|
|
|
- .attrs = nhm_uncore_formats_attr,
|
|
|
+ .attrs = nhmex_uncore_rbox_formats_attr,
|
|
|
};
|
|
|
|
|
|
-static struct uncore_event_desc nhm_uncore_events[] = {
|
|
|
- INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
|
|
|
- INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
|
|
|
+static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
|
|
|
+ INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
|
|
|
{ /* end: all zeroes */ },
|
|
|
};
|
|
|
|
|
|
-static struct intel_uncore_ops nhm_uncore_msr_ops = {
|
|
|
- .disable_box = nhm_uncore_msr_disable_box,
|
|
|
- .enable_box = nhm_uncore_msr_enable_box,
|
|
|
- .disable_event = snb_uncore_msr_disable_event,
|
|
|
- .enable_event = nhm_uncore_msr_enable_event,
|
|
|
- .read_counter = snb_uncore_msr_read_counter,
|
|
|
+static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
|
|
|
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
|
|
|
+ .enable_event = nhmex_rbox_msr_enable_event,
|
|
|
+ .hw_config = nhmex_rbox_hw_config,
|
|
|
+ .get_constraint = nhmex_rbox_get_constraint,
|
|
|
+ .put_constraint = nhmex_rbox_put_constraint,
|
|
|
};
|
|
|
|
|
|
-static struct intel_uncore_type nhm_uncore = {
|
|
|
- .name = "",
|
|
|
- .num_counters = 8,
|
|
|
- .num_boxes = 1,
|
|
|
- .perf_ctr_bits = 48,
|
|
|
- .fixed_ctr_bits = 48,
|
|
|
- .event_ctl = NHM_UNC_PERFEVTSEL0,
|
|
|
- .perf_ctr = NHM_UNC_UNCORE_PMC0,
|
|
|
- .fixed_ctr = NHM_UNC_FIXED_CTR,
|
|
|
- .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
|
|
|
- .event_mask = NHM_UNC_RAW_EVENT_MASK,
|
|
|
- .event_descs = nhm_uncore_events,
|
|
|
- .ops = &nhm_uncore_msr_ops,
|
|
|
- .format_group = &nhm_uncore_format_group,
|
|
|
+static struct intel_uncore_type nhmex_uncore_rbox = {
|
|
|
+ .name = "rbox",
|
|
|
+ .num_counters = 8,
|
|
|
+ .num_boxes = 2,
|
|
|
+ .perf_ctr_bits = 48,
|
|
|
+ .event_ctl = NHMEX_R_MSR_PMON_CTL0,
|
|
|
+ .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
|
|
|
+ .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
|
|
|
+ .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
|
|
|
+ .msr_offset = NHMEX_R_MSR_OFFSET,
|
|
|
+ .pair_ctr_ctl = 1,
|
|
|
+ .num_shared_regs = 20,
|
|
|
+ .event_descs = nhmex_uncore_rbox_events,
|
|
|
+ .ops = &nhmex_uncore_rbox_ops,
|
|
|
+ .format_group = &nhmex_uncore_rbox_format_group
|
|
|
};
|
|
|
|
|
|
-static struct intel_uncore_type *nhm_msr_uncores[] = {
|
|
|
- &nhm_uncore,
|
|
|
+static struct intel_uncore_type *nhmex_msr_uncores[] = {
|
|
|
+ &nhmex_uncore_ubox,
|
|
|
+ &nhmex_uncore_cbox,
|
|
|
+ &nhmex_uncore_bbox,
|
|
|
+ &nhmex_uncore_sbox,
|
|
|
+ &nhmex_uncore_mbox,
|
|
|
+ &nhmex_uncore_rbox,
|
|
|
+ &nhmex_uncore_wbox,
|
|
|
NULL,
|
|
|
};
|
|
|
-/* end of Nehalem uncore support */
|
|
|
+/* end of Nehalem-EX uncore support */
|
|
|
|
|
|
-static void uncore_assign_hw_event(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event, int idx)
|
|
|
+static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
|
@@ -787,8 +1841,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
|
|
|
hwc->event_base = uncore_perf_ctr(box, hwc->idx);
|
|
|
}
|
|
|
|
|
|
-static void uncore_perf_event_update(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
u64 prev_count, new_count, delta;
|
|
|
int shift;
|
|
@@ -858,14 +1911,12 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
|
|
|
box->hrtimer.function = uncore_pmu_hrtimer;
|
|
|
}
|
|
|
|
|
|
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
|
|
|
- int cpu)
|
|
|
+struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
|
|
|
{
|
|
|
struct intel_uncore_box *box;
|
|
|
int i, size;
|
|
|
|
|
|
- size = sizeof(*box) + type->num_shared_regs *
|
|
|
- sizeof(struct intel_uncore_extra_reg);
|
|
|
+ size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
|
|
|
|
|
|
box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
|
|
|
if (!box)
|
|
@@ -915,12 +1966,11 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
|
|
|
* perf core schedules event on the basis of cpu, uncore events are
|
|
|
* collected by one of the cpus inside a physical package.
|
|
|
*/
|
|
|
- return uncore_pmu_to_box(uncore_event_to_pmu(event),
|
|
|
- smp_processor_id());
|
|
|
+ return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
|
|
|
}
|
|
|
|
|
|
-static int uncore_collect_events(struct intel_uncore_box *box,
|
|
|
- struct perf_event *leader, bool dogrp)
|
|
|
+static int
|
|
|
+uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
|
|
|
{
|
|
|
struct perf_event *event;
|
|
|
int n, max_count;
|
|
@@ -952,8 +2002,7 @@ static int uncore_collect_events(struct intel_uncore_box *box,
|
|
|
}
|
|
|
|
|
|
static struct event_constraint *
|
|
|
-uncore_get_event_constraint(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
struct intel_uncore_type *type = box->pmu->type;
|
|
|
struct event_constraint *c;
|
|
@@ -977,15 +2026,13 @@ uncore_get_event_constraint(struct intel_uncore_box *box,
|
|
|
return &type->unconstrainted;
|
|
|
}
|
|
|
|
|
|
-static void uncore_put_event_constraint(struct intel_uncore_box *box,
|
|
|
- struct perf_event *event)
|
|
|
+static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
|
|
|
{
|
|
|
if (box->pmu->type->ops->put_constraint)
|
|
|
box->pmu->type->ops->put_constraint(box, event);
|
|
|
}
|
|
|
|
|
|
-static int uncore_assign_events(struct intel_uncore_box *box,
|
|
|
- int assign[], int n)
|
|
|
+static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
|
|
|
{
|
|
|
unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
|
|
|
struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
|
|
@@ -1407,8 +2454,7 @@ static bool pcidrv_registered;
|
|
|
/*
|
|
|
* add a pci uncore device
|
|
|
*/
|
|
|
-static int __devinit uncore_pci_add(struct intel_uncore_type *type,
|
|
|
- struct pci_dev *pdev)
|
|
|
+static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
|
|
|
{
|
|
|
struct intel_uncore_pmu *pmu;
|
|
|
struct intel_uncore_box *box;
|
|
@@ -1485,6 +2531,7 @@ static int __devinit uncore_pci_probe(struct pci_dev *pdev,
|
|
|
struct intel_uncore_type *type;
|
|
|
|
|
|
type = (struct intel_uncore_type *)id->driver_data;
|
|
|
+
|
|
|
return uncore_pci_add(type, pdev);
|
|
|
}
|
|
|
|
|
@@ -1612,8 +2659,8 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
|
|
|
- int old_cpu, int new_cpu)
|
|
|
+static void __cpuinit
|
|
|
+uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
|
|
|
{
|
|
|
struct intel_uncore_type *type;
|
|
|
struct intel_uncore_pmu *pmu;
|
|
@@ -1694,8 +2741,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
|
|
|
uncore_change_context(pci_uncores, -1, cpu);
|
|
|
}
|
|
|
|
|
|
-static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
+static int
|
|
|
+ __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
{
|
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
|
@@ -1732,12 +2779,12 @@ static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
|
|
|
}
|
|
|
|
|
|
static struct notifier_block uncore_cpu_nb __cpuinitdata = {
|
|
|
- .notifier_call = uncore_cpu_notifier,
|
|
|
+ .notifier_call = uncore_cpu_notifier,
|
|
|
/*
|
|
|
* to migrate uncore events, our notifier should be executed
|
|
|
* before perf core's notifier.
|
|
|
*/
|
|
|
- .priority = CPU_PRI_PERF + 1,
|
|
|
+ .priority = CPU_PRI_PERF + 1,
|
|
|
};
|
|
|
|
|
|
static void __init uncore_cpu_setup(void *dummy)
|
|
@@ -1767,6 +2814,9 @@ static int __init uncore_cpu_init(void)
|
|
|
snbep_uncore_cbox.num_boxes = max_cores;
|
|
|
msr_uncores = snbep_msr_uncores;
|
|
|
break;
|
|
|
+ case 46:
|
|
|
+ msr_uncores = nhmex_msr_uncores;
|
|
|
+ break;
|
|
|
default:
|
|
|
return 0;
|
|
|
}
|