|
|
@@ -453,6 +453,37 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
|
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
|
|
}
|
|
|
|
|
|
+static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
|
|
+
|
|
|
+ return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
|
|
|
+}
|
|
|
+
|
|
|
+static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
|
|
+
|
|
|
+ return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
|
|
|
+ || vcpu_mode_priv(vcpu));
|
|
|
+}
|
|
|
+
|
|
|
+static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
|
|
+
|
|
|
+ return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
|
|
|
+ || vcpu_mode_priv(vcpu));
|
|
|
+}
|
|
|
+
|
|
|
+static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
|
|
+
|
|
|
+ return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
|
|
|
+ || vcpu_mode_priv(vcpu));
|
|
|
+}
|
|
|
+
|
|
|
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
const struct sys_reg_desc *r)
|
|
|
{
|
|
|
@@ -461,6 +492,9 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (pmu_access_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (p->is_write) {
|
|
|
/* Only update writeable bits of PMCR */
|
|
|
val = vcpu_sys_reg(vcpu, PMCR_EL0);
|
|
|
@@ -484,6 +518,9 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (p->is_write)
|
|
|
vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
|
|
|
else
|
|
|
@@ -504,6 +541,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
|
|
BUG_ON(p->is_write);
|
|
|
|
|
|
+ if (pmu_access_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (!(p->Op2 & 1))
|
|
|
asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
|
|
|
else
|
|
|
@@ -538,16 +578,25 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
|
|
if (r->CRn == 9 && r->CRm == 13) {
|
|
|
if (r->Op2 == 2) {
|
|
|
/* PMXEVCNTR_EL0 */
|
|
|
+ if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
|
|
|
& ARMV8_PMU_COUNTER_MASK;
|
|
|
} else if (r->Op2 == 0) {
|
|
|
/* PMCCNTR_EL0 */
|
|
|
+ if (pmu_access_cycle_counter_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
idx = ARMV8_PMU_CYCLE_IDX;
|
|
|
} else {
|
|
|
BUG();
|
|
|
}
|
|
|
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
|
|
|
/* PMEVCNTRn_EL0 */
|
|
|
+ if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
|
|
} else {
|
|
|
BUG();
|
|
|
@@ -556,10 +605,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
|
|
if (!pmu_counter_idx_valid(vcpu, idx))
|
|
|
return false;
|
|
|
|
|
|
- if (p->is_write)
|
|
|
+ if (p->is_write) {
|
|
|
+ if (pmu_access_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
|
|
|
- else
|
|
|
+ } else {
|
|
|
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
|
|
|
+ }
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
@@ -572,6 +625,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (pmu_access_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
|
|
|
/* PMXEVTYPER_EL0 */
|
|
|
idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
|
|
|
@@ -608,6 +664,9 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (pmu_access_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
if (p->is_write) {
|
|
|
val = p->regval & mask;
|
|
|
@@ -635,6 +694,9 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (!vcpu_mode_priv(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (p->is_write) {
|
|
|
u64 val = p->regval & mask;
|
|
|
|
|
|
@@ -659,6 +721,9 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (pmu_access_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (p->is_write) {
|
|
|
if (r->CRm & 0x2)
|
|
|
/* accessing PMOVSSET_EL0 */
|
|
|
@@ -681,6 +746,9 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
return trap_raz_wi(vcpu, p, r);
|
|
|
|
|
|
+ if (pmu_write_swinc_el0_disabled(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
if (p->is_write) {
|
|
|
mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
kvm_pmu_software_increment(vcpu, p->regval & mask);
|
|
|
@@ -690,6 +758,26 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
+ const struct sys_reg_desc *r)
|
|
|
+{
|
|
|
+ if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
+ return trap_raz_wi(vcpu, p, r);
|
|
|
+
|
|
|
+ if (p->is_write) {
|
|
|
+ if (!vcpu_mode_priv(vcpu))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
|
|
|
+ & ARMV8_PMU_USERENR_MASK;
|
|
|
+ } else {
|
|
|
+ p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
|
|
|
+ & ARMV8_PMU_USERENR_MASK;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
|
|
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
|
|
/* DBGBVRn_EL1 */ \
|
|
|
@@ -919,9 +1007,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|
|
/* PMXEVCNTR_EL0 */
|
|
|
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
|
|
|
access_pmu_evcntr },
|
|
|
- /* PMUSERENR_EL0 */
|
|
|
+ /* PMUSERENR_EL0
|
|
|
+ * This register resets as unknown in 64bit mode while it resets as zero
|
|
|
+ * in 32bit mode. Here we choose to reset it as zero for consistency.
|
|
|
+ */
|
|
|
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
|
|
|
- trap_raz_wi },
|
|
|
+ access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
|
|
|
/* PMOVSSET_EL0 */
|
|
|
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
|
|
|
access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
|
|
@@ -1246,7 +1337,7 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|
|
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
|
|
|
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
|
|
|
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
|
|
|
- { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
|
|
|
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
|
|
|
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
|
|
|
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
|
|
|
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
|