|
@@ -248,49 +248,45 @@ static int perf_iommu_event_init(struct perf_event *event)
|
|
|
|
|
|
static void perf_iommu_enable_event(struct perf_event *ev)
|
|
|
{
|
|
|
+ struct amd_iommu *iommu = get_amd_iommu(0);
|
|
|
u8 csource = _GET_CSOURCE(ev);
|
|
|
u16 devid = _GET_DEVID(ev);
|
|
|
+ u8 bank = _GET_BANK(ev);
|
|
|
+ u8 cntr = _GET_CNTR(ev);
|
|
|
u64 reg = 0ULL;
|
|
|
|
|
|
reg = csource;
|
|
|
- amd_iommu_pc_get_set_reg_val(devid,
|
|
|
- _GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
- IOMMU_PC_COUNTER_SRC_REG, ®, true);
|
|
|
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®);
|
|
|
|
|
|
reg = devid | (_GET_DEVID_MASK(ev) << 32);
|
|
|
if (reg)
|
|
|
reg |= BIT(31);
|
|
|
- amd_iommu_pc_get_set_reg_val(devid,
|
|
|
- _GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
- IOMMU_PC_DEVID_MATCH_REG, ®, true);
|
|
|
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®);
|
|
|
|
|
|
reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
|
|
|
if (reg)
|
|
|
reg |= BIT(31);
|
|
|
- amd_iommu_pc_get_set_reg_val(devid,
|
|
|
- _GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
- IOMMU_PC_PASID_MATCH_REG, ®, true);
|
|
|
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®);
|
|
|
|
|
|
reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
|
|
|
if (reg)
|
|
|
reg |= BIT(31);
|
|
|
- amd_iommu_pc_get_set_reg_val(devid,
|
|
|
- _GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
- IOMMU_PC_DOMID_MATCH_REG, ®, true);
|
|
|
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®);
|
|
|
}
|
|
|
|
|
|
static void perf_iommu_disable_event(struct perf_event *event)
|
|
|
{
|
|
|
+ struct amd_iommu *iommu = get_amd_iommu(0);
|
|
|
u64 reg = 0ULL;
|
|
|
|
|
|
- amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
|
|
|
- _GET_BANK(event), _GET_CNTR(event),
|
|
|
- IOMMU_PC_COUNTER_SRC_REG, ®, true);
|
|
|
+ amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event),
|
|
|
+ IOMMU_PC_COUNTER_SRC_REG, ®);
|
|
|
}
|
|
|
|
|
|
static void perf_iommu_start(struct perf_event *event, int flags)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct amd_iommu *iommu = get_amd_iommu(0);
|
|
|
|
|
|
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
|
|
|
return;
|
|
@@ -300,9 +296,8 @@ static void perf_iommu_start(struct perf_event *event, int flags)
|
|
|
|
|
|
if (flags & PERF_EF_RELOAD) {
|
|
|
u64 prev_raw_count = local64_read(&hwc->prev_count);
|
|
|
- amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
|
|
|
- _GET_BANK(event), _GET_CNTR(event),
|
|
|
- IOMMU_PC_COUNTER_REG, &prev_raw_count, true);
|
|
|
+ amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event),
|
|
|
+ IOMMU_PC_COUNTER_REG, &prev_raw_count);
|
|
|
}
|
|
|
|
|
|
perf_iommu_enable_event(event);
|
|
@@ -314,10 +309,11 @@ static void perf_iommu_read(struct perf_event *event)
|
|
|
{
|
|
|
u64 count, prev, delta;
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct amd_iommu *iommu = get_amd_iommu(0);
|
|
|
|
|
|
- amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
|
|
|
- _GET_BANK(event), _GET_CNTR(event),
|
|
|
- IOMMU_PC_COUNTER_REG, &count, false);
|
|
|
+ if (amd_iommu_pc_get_reg(iommu, _GET_BANK(event), _GET_CNTR(event),
|
|
|
+ IOMMU_PC_COUNTER_REG, &count))
|
|
|
+ return;
|
|
|
|
|
|
/* IOMMU pc counter register is only 48 bits */
|
|
|
count &= GENMASK_ULL(47, 0);
|