|
@@ -35,6 +35,7 @@
|
|
|
#include <asm/kvm_coproc.h>
|
|
|
#include <asm/kvm_emulate.h>
|
|
|
#include <asm/kvm_host.h>
|
|
|
+#include <asm/kvm_hyp.h>
|
|
|
#include <asm/kvm_mmu.h>
|
|
|
#include <asm/perf_event.h>
|
|
|
#include <asm/sysreg.h>
|
|
@@ -76,6 +77,93 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg)
|
|
|
+{
|
|
|
+ if (!vcpu->arch.sysregs_loaded_on_cpu)
|
|
|
+ goto immediate_read;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * System registers listed in the switch are not saved on every
|
|
|
+ * exit from the guest but are only saved on vcpu_put.
|
|
|
+ *
|
|
|
+ * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
|
|
+ * should never be listed below, because the guest cannot modify its
|
|
|
+ * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
|
|
|
+ * thread when emulating cross-VCPU communication.
|
|
|
+ */
|
|
|
+ switch (reg) {
|
|
|
+ case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
|
|
|
+ case SCTLR_EL1: return read_sysreg_s(sctlr_EL12);
|
|
|
+ case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
|
|
|
+ case CPACR_EL1: return read_sysreg_s(cpacr_EL12);
|
|
|
+ case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12);
|
|
|
+ case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12);
|
|
|
+ case TCR_EL1: return read_sysreg_s(tcr_EL12);
|
|
|
+ case ESR_EL1: return read_sysreg_s(esr_EL12);
|
|
|
+ case AFSR0_EL1: return read_sysreg_s(afsr0_EL12);
|
|
|
+ case AFSR1_EL1: return read_sysreg_s(afsr1_EL12);
|
|
|
+ case FAR_EL1: return read_sysreg_s(far_EL12);
|
|
|
+ case MAIR_EL1: return read_sysreg_s(mair_EL12);
|
|
|
+ case VBAR_EL1: return read_sysreg_s(vbar_EL12);
|
|
|
+ case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12);
|
|
|
+ case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
|
|
|
+ case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
|
|
|
+ case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
|
|
|
+ case AMAIR_EL1: return read_sysreg_s(amair_EL12);
|
|
|
+ case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12);
|
|
|
+ case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
|
|
|
+ case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
|
|
|
+ case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
|
|
|
+ case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
|
|
|
+ }
|
|
|
+
|
|
|
+immediate_read:
|
|
|
+ return __vcpu_sys_reg(vcpu, reg);
|
|
|
+}
|
|
|
+
|
|
|
+void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
|
|
+{
|
|
|
+ if (!vcpu->arch.sysregs_loaded_on_cpu)
|
|
|
+ goto immediate_write;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * System registers listed in the switch are not restored on every
|
|
|
+ * entry to the guest but are only restored on vcpu_load.
|
|
|
+ *
|
|
|
+ * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
|
|
|
+ * should never be listed below, because the the MPIDR should only be
|
|
|
+ * set once, before running the VCPU, and never changed later.
|
|
|
+ */
|
|
|
+ switch (reg) {
|
|
|
+ case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
|
|
|
+ case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return;
|
|
|
+ case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
|
|
|
+ case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return;
|
|
|
+ case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return;
|
|
|
+ case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return;
|
|
|
+ case TCR_EL1: write_sysreg_s(val, tcr_EL12); return;
|
|
|
+ case ESR_EL1: write_sysreg_s(val, esr_EL12); return;
|
|
|
+ case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return;
|
|
|
+ case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return;
|
|
|
+ case FAR_EL1: write_sysreg_s(val, far_EL12); return;
|
|
|
+ case MAIR_EL1: write_sysreg_s(val, mair_EL12); return;
|
|
|
+ case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return;
|
|
|
+ case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return;
|
|
|
+ case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
|
|
|
+ case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
|
|
|
+ case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
|
|
|
+ case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return;
|
|
|
+ case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return;
|
|
|
+ case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
|
|
|
+ case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
|
|
|
+ case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
|
|
|
+ case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
|
|
|
+ }
|
|
|
+
|
|
|
+immediate_write:
|
|
|
+ __vcpu_sys_reg(vcpu, reg) = val;
|
|
|
+}
|
|
|
+
|
|
|
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
|
|
|
static u32 cache_levels;
|
|
|
|
|
@@ -121,16 +209,26 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
|
|
const struct sys_reg_desc *r)
|
|
|
{
|
|
|
bool was_enabled = vcpu_has_cache_enabled(vcpu);
|
|
|
+ u64 val;
|
|
|
+ int reg = r->reg;
|
|
|
|
|
|
BUG_ON(!p->is_write);
|
|
|
|
|
|
- if (!p->is_aarch32) {
|
|
|
- vcpu_sys_reg(vcpu, r->reg) = p->regval;
|
|
|
+ /* See the 32bit mapping in kvm_host.h */
|
|
|
+ if (p->is_aarch32)
|
|
|
+ reg = r->reg / 2;
|
|
|
+
|
|
|
+ if (!p->is_aarch32 || !p->is_32bit) {
|
|
|
+ val = p->regval;
|
|
|
} else {
|
|
|
- if (!p->is_32bit)
|
|
|
- vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
|
|
|
- vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
|
|
|
+ val = vcpu_read_sys_reg(vcpu, reg);
|
|
|
+ if (r->reg % 2)
|
|
|
+ val = (p->regval << 32) | (u64)lower_32_bits(val);
|
|
|
+ else
|
|
|
+ val = ((u64)upper_32_bits(val) << 32) |
|
|
|
+ lower_32_bits(p->regval);
|
|
|
}
|
|
|
+ vcpu_write_sys_reg(vcpu, val, reg);
|
|
|
|
|
|
kvm_toggle_cache(vcpu, was_enabled);
|
|
|
return true;
|
|
@@ -175,6 +273,14 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
|
|
return read_zero(vcpu, p);
|
|
|
}
|
|
|
|
|
|
+static bool trap_undef(struct kvm_vcpu *vcpu,
|
|
|
+ struct sys_reg_params *p,
|
|
|
+ const struct sys_reg_desc *r)
|
|
|
+{
|
|
|
+ kvm_inject_undefined(vcpu);
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
|
|
|
struct sys_reg_params *p,
|
|
|
const struct sys_reg_desc *r)
|
|
@@ -231,10 +337,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
|
|
const struct sys_reg_desc *r)
|
|
|
{
|
|
|
if (p->is_write) {
|
|
|
- vcpu_sys_reg(vcpu, r->reg) = p->regval;
|
|
|
+ vcpu_write_sys_reg(vcpu, p->regval, r->reg);
|
|
|
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
|
|
} else {
|
|
|
- p->regval = vcpu_sys_reg(vcpu, r->reg);
|
|
|
+ p->regval = vcpu_read_sys_reg(vcpu, r->reg);
|
|
|
}
|
|
|
|
|
|
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
|
|
@@ -447,7 +553,8 @@ static void reset_wcr(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
|
{
|
|
|
- vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
|
|
|
+ u64 amair = read_sysreg(amair_el1);
|
|
|
+ vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
|
|
|
}
|
|
|
|
|
|
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
@@ -464,7 +571,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
|
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
|
|
|
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
|
|
|
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
|
|
|
- vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
|
|
|
+ vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
|
|
|
}
|
|
|
|
|
|
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
@@ -478,12 +585,12 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|
|
*/
|
|
|
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
|
|
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
|
|
- vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
|
|
+ __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
|
|
}
|
|
|
|
|
|
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
|
|
{
|
|
|
- u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
|
|
+ u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
|
|
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
|
|
|
|
|
|
if (!enabled)
|
|
@@ -525,14 +632,14 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
|
|
if (p->is_write) {
|
|
|
/* Only update writeable bits of PMCR */
|
|
|
- val = vcpu_sys_reg(vcpu, PMCR_EL0);
|
|
|
+ val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
|
|
val &= ~ARMV8_PMU_PMCR_MASK;
|
|
|
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
|
|
- vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
|
|
+ __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
|
|
kvm_pmu_handle_pmcr(vcpu, val);
|
|
|
} else {
|
|
|
/* PMCR.P & PMCR.C are RAZ */
|
|
|
- val = vcpu_sys_reg(vcpu, PMCR_EL0)
|
|
|
+ val = __vcpu_sys_reg(vcpu, PMCR_EL0)
|
|
|
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
|
|
|
p->regval = val;
|
|
|
}
|
|
@@ -550,10 +657,10 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
return false;
|
|
|
|
|
|
if (p->is_write)
|
|
|
- vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
|
|
|
+ __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
|
|
|
else
|
|
|
/* return PMSELR.SEL field */
|
|
|
- p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
|
|
|
+ p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
|
|
|
& ARMV8_PMU_COUNTER_MASK;
|
|
|
|
|
|
return true;
|
|
@@ -586,7 +693,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
|
|
|
{
|
|
|
u64 pmcr, val;
|
|
|
|
|
|
- pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
|
|
|
+ pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
|
|
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
|
|
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
|
|
|
kvm_inject_undefined(vcpu);
|
|
@@ -611,7 +718,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
|
|
if (pmu_access_event_counter_el0_disabled(vcpu))
|
|
|
return false;
|
|
|
|
|
|
- idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
|
|
|
+ idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
|
|
|
& ARMV8_PMU_COUNTER_MASK;
|
|
|
} else if (r->Op2 == 0) {
|
|
|
/* PMCCNTR_EL0 */
|
|
@@ -666,7 +773,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
|
|
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
|
|
|
/* PMXEVTYPER_EL0 */
|
|
|
- idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
|
|
|
+ idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
|
|
|
reg = PMEVTYPER0_EL0 + idx;
|
|
|
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
|
|
|
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
|
@@ -684,9 +791,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
|
|
if (p->is_write) {
|
|
|
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
|
|
|
- vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
|
|
|
+ __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
|
|
|
} else {
|
|
|
- p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
|
|
|
+ p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
|
|
|
}
|
|
|
|
|
|
return true;
|
|
@@ -708,15 +815,15 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
val = p->regval & mask;
|
|
|
if (r->Op2 & 0x1) {
|
|
|
/* accessing PMCNTENSET_EL0 */
|
|
|
- vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
|
|
+ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
|
|
kvm_pmu_enable_counter(vcpu, val);
|
|
|
} else {
|
|
|
/* accessing PMCNTENCLR_EL0 */
|
|
|
- vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
|
|
+ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
|
|
kvm_pmu_disable_counter(vcpu, val);
|
|
|
}
|
|
|
} else {
|
|
|
- p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
|
|
|
+ p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
|
|
|
}
|
|
|
|
|
|
return true;
|
|
@@ -740,12 +847,12 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
|
|
|
if (r->Op2 & 0x1)
|
|
|
/* accessing PMINTENSET_EL1 */
|
|
|
- vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
|
|
|
+ __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
|
|
|
else
|
|
|
/* accessing PMINTENCLR_EL1 */
|
|
|
- vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
|
|
+ __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
|
|
} else {
|
|
|
- p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
|
|
|
+ p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
|
|
|
}
|
|
|
|
|
|
return true;
|
|
@@ -765,12 +872,12 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
if (p->is_write) {
|
|
|
if (r->CRm & 0x2)
|
|
|
/* accessing PMOVSSET_EL0 */
|
|
|
- vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
|
|
|
+ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
|
|
|
else
|
|
|
/* accessing PMOVSCLR_EL0 */
|
|
|
- vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
|
|
+ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
|
|
} else {
|
|
|
- p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
|
|
|
+ p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
|
|
|
}
|
|
|
|
|
|
return true;
|
|
@@ -807,10 +914,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
|
|
|
- & ARMV8_PMU_USERENR_MASK;
|
|
|
+ __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
|
|
|
+ p->regval & ARMV8_PMU_USERENR_MASK;
|
|
|
} else {
|
|
|
- p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
|
|
|
+ p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
|
|
|
& ARMV8_PMU_USERENR_MASK;
|
|
|
}
|
|
|
|
|
@@ -893,6 +1000,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
|
|
|
task_pid_nr(current));
|
|
|
|
|
|
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
|
|
|
+ } else if (id == SYS_ID_AA64MMFR1_EL1) {
|
|
|
+ if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
|
|
|
+ pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n",
|
|
|
+ task_pid_nr(current));
|
|
|
+
|
|
|
+ val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
|
|
|
}
|
|
|
|
|
|
return val;
|
|
@@ -1178,6 +1291,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|
|
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
|
|
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
|
|
|
|
|
+ { SYS_DESC(SYS_LORSA_EL1), trap_undef },
|
|
|
+ { SYS_DESC(SYS_LOREA_EL1), trap_undef },
|
|
|
+ { SYS_DESC(SYS_LORN_EL1), trap_undef },
|
|
|
+ { SYS_DESC(SYS_LORC_EL1), trap_undef },
|
|
|
+ { SYS_DESC(SYS_LORID_EL1), trap_undef },
|
|
|
+
|
|
|
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
|
|
|
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
|
|
|
|
|
@@ -1545,6 +1664,11 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|
|
|
|
|
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
|
|
|
|
|
+ /* CNTP_TVAL */
|
|
|
+ { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
|
|
|
+ /* CNTP_CTL */
|
|
|
+ { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
|
|
|
+
|
|
|
/* PMEVCNTRn */
|
|
|
PMU_PMEVCNTR(0),
|
|
|
PMU_PMEVCNTR(1),
|
|
@@ -1618,6 +1742,7 @@ static const struct sys_reg_desc cp15_64_regs[] = {
|
|
|
{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
|
|
|
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
|
|
|
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
|
|
|
+ { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
|
|
|
};
|
|
|
|
|
|
/* Target specific emulation tables */
|
|
@@ -2194,7 +2319,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
|
|
|
if (r->get_user)
|
|
|
return (r->get_user)(vcpu, r, reg, uaddr);
|
|
|
|
|
|
- return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
|
|
|
+ return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
|
|
|
}
|
|
|
|
|
|
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|
@@ -2215,7 +2340,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
|
|
|
if (r->set_user)
|
|
|
return (r->set_user)(vcpu, r, reg, uaddr);
|
|
|
|
|
|
- return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
|
|
|
+ return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
|
|
|
}
|
|
|
|
|
|
static unsigned int num_demux_regs(void)
|
|
@@ -2421,6 +2546,6 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|
|
reset_sys_reg_descs(vcpu, table, num);
|
|
|
|
|
|
for (num = 1; num < NR_SYS_REGS; num++)
|
|
|
- if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
|
|
|
- panic("Didn't reset vcpu_sys_reg(%zi)", num);
|
|
|
+ if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
|
|
|
+ panic("Didn't reset __vcpu_sys_reg(%zi)", num);
|
|
|
}
|