|
|
@@ -164,11 +164,11 @@ static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu)
|
|
|
for (bank = 0, shift = 0; bank < max_banks; bank++) {
|
|
|
for (cntr = 0; cntr < max_cntrs; cntr++) {
|
|
|
shift = bank + (bank*3) + cntr;
|
|
|
- if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) {
|
|
|
+ if (perf_iommu->cntr_assign_mask & BIT_ULL(shift)) {
|
|
|
continue;
|
|
|
} else {
|
|
|
- perf_iommu->cntr_assign_mask |= (1ULL<<shift);
|
|
|
- retval = ((u16)((u16)bank<<8) | (u8)(cntr));
|
|
|
+ perf_iommu->cntr_assign_mask |= BIT_ULL(shift);
|
|
|
+ retval = ((bank & 0xFF) << 8) | (cntr & 0xFF);
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
@@ -265,23 +265,23 @@ static void perf_iommu_enable_event(struct perf_event *ev)
|
|
|
_GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
IOMMU_PC_COUNTER_SRC_REG, ®, true);
|
|
|
|
|
|
- reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32);
|
|
|
+ reg = devid | (_GET_DEVID_MASK(ev) << 32);
|
|
|
if (reg)
|
|
|
- reg |= (1UL << 31);
|
|
|
+ reg |= BIT(31);
|
|
|
amd_iommu_pc_get_set_reg_val(devid,
|
|
|
_GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
IOMMU_PC_DEVID_MATCH_REG, ®, true);
|
|
|
|
|
|
- reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
|
|
|
+ reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
|
|
|
if (reg)
|
|
|
- reg |= (1UL << 31);
|
|
|
+ reg |= BIT(31);
|
|
|
amd_iommu_pc_get_set_reg_val(devid,
|
|
|
_GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
IOMMU_PC_PASID_MATCH_REG, ®, true);
|
|
|
|
|
|
- reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
|
|
|
+ reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
|
|
|
if (reg)
|
|
|
- reg |= (1UL << 31);
|
|
|
+ reg |= BIT(31);
|
|
|
amd_iommu_pc_get_set_reg_val(devid,
|
|
|
_GET_BANK(ev), _GET_CNTR(ev) ,
|
|
|
IOMMU_PC_DOMID_MATCH_REG, ®, true);
|