|
@@ -360,78 +360,6 @@ struct kvm_vcpu_arch {
|
|
|
u8 msa_enabled;
|
|
|
};
|
|
|
|
|
|
-
|
|
|
-#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
|
|
|
-#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
|
|
|
-#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
|
|
|
-#define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val))
|
|
|
-#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
|
|
|
-#define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val))
|
|
|
-#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
|
|
|
-#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
|
|
|
-#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
|
|
|
-#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
|
|
|
-#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
|
|
|
-#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
|
|
|
-#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
|
|
|
-#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
|
|
|
-#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
|
|
|
-#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
|
|
|
-#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
|
|
|
-#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
|
|
|
-#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
|
|
|
-#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
|
|
|
-#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
|
|
|
-#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
|
|
|
-#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
|
|
|
-#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
|
|
|
-#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
|
|
|
-#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
|
|
|
-#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
|
|
|
-#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
|
|
|
-#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
|
|
|
-#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
|
|
|
-#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
|
|
|
-#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
|
|
|
-#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
|
|
|
-#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
|
|
|
-#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
|
|
|
-#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
|
|
|
-#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
|
|
|
-#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
|
|
|
-#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
|
|
|
-#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
|
|
|
-#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
|
|
|
-#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
|
|
|
-#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
|
|
|
-#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
|
|
|
-#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
|
|
|
-#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
|
|
|
-#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
|
|
|
-#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
|
|
|
-#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
|
|
|
-#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
|
|
|
-#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
|
|
|
-#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
|
|
|
-#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
|
|
|
-#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
|
|
|
-#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
|
|
|
-#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
|
|
|
-#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
|
|
|
-#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
|
|
|
-#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
|
|
|
-#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
|
|
|
-#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
|
|
|
-#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
|
|
|
-#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
|
|
|
-#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
|
|
|
-
|
|
|
-/*
|
|
|
- * Some of the guest registers may be modified asynchronously (e.g. from a
|
|
|
- * hrtimer callback in hard irq context) and therefore need stronger atomicity
|
|
|
- * guarantees than other registers.
|
|
|
- */
|
|
|
-
|
|
|
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
|
|
|
unsigned long val)
|
|
|
{
|
|
@@ -482,26 +410,265 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
|
|
} while (unlikely(!temp));
|
|
|
}
|
|
|
|
|
|
-#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
|
|
|
-#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
|
|
|
+/* Guest register types, used in accessor build below */
|
|
|
+#define __KVMT32 u32
|
|
|
+#define __KVMTl unsigned long
|
|
|
|
|
|
-/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
|
|
-#define kvm_set_c0_guest_cause(cop0, val) \
|
|
|
- _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
|
|
-#define kvm_clear_c0_guest_cause(cop0, val) \
|
|
|
- _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
|
|
-#define kvm_change_c0_guest_cause(cop0, change, val) \
|
|
|
- _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
|
|
|
- change, val)
|
|
|
-
|
|
|
-#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
|
|
|
-#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
|
|
|
-#define kvm_change_c0_guest_ebase(cop0, change, val) \
|
|
|
+/*
|
|
|
+ * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
|
|
|
+ * These operate on the saved guest C0 state in RAM.
|
|
|
+ */
|
|
|
+
|
|
|
+/* Generate saved context simple accessors */
|
|
|
+#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
|
|
+static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
|
|
|
+{ \
|
|
|
+ return cop0->reg[(_reg)][(sel)]; \
|
|
|
+} \
|
|
|
+static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ cop0->reg[(_reg)][(sel)] = val; \
|
|
|
+}
|
|
|
+
|
|
|
+/* Generate saved context bitwise modifiers */
|
|
|
+#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
|
|
+static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ cop0->reg[(_reg)][(sel)] |= val; \
|
|
|
+} \
|
|
|
+static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ cop0->reg[(_reg)][(sel)] &= ~val; \
|
|
|
+} \
|
|
|
+static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type mask, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ unsigned long _mask = mask; \
|
|
|
+ cop0->reg[(_reg)][(sel)] &= ~_mask; \
|
|
|
+ cop0->reg[(_reg)][(sel)] |= val & _mask; \
|
|
|
+}
|
|
|
+
|
|
|
+/* Generate saved context atomic bitwise modifiers */
|
|
|
+#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
|
|
+static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
|
|
+} \
|
|
|
+static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
|
|
+} \
|
|
|
+static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type mask, \
|
|
|
+ __KVMT##type val) \
|
|
|
{ \
|
|
|
- kvm_clear_c0_guest_ebase(cop0, change); \
|
|
|
- kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
|
|
|
+ _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
|
|
|
+ val); \
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
|
|
|
+ * These operate on the VZ guest C0 context in hardware.
|
|
|
+ */
|
|
|
+
|
|
|
+/* Generate VZ guest context simple accessors */
|
|
|
+#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
|
|
+static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
|
|
|
+{ \
|
|
|
+ return read_gc0_##name(); \
|
|
|
+} \
|
|
|
+static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ write_gc0_##name(val); \
|
|
|
+}
|
|
|
+
|
|
|
+/* Generate VZ guest context bitwise modifiers */
|
|
|
+#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
|
|
+static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ set_gc0_##name(val); \
|
|
|
+} \
|
|
|
+static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ clear_gc0_##name(val); \
|
|
|
+} \
|
|
|
+static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type mask, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ change_gc0_##name(mask, val); \
|
|
|
+}
|
|
|
+
|
|
|
+/* Generate VZ guest context save/restore to/from saved context */
|
|
|
+#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
|
|
|
+static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
|
|
|
+{ \
|
|
|
+ write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
|
|
|
+} \
|
|
|
+static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
|
|
|
+{ \
|
|
|
+ cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
|
|
|
+ * These wrap a set of operations to provide them with a different name.
|
|
|
+ */
|
|
|
+
|
|
|
+/* Generate simple accessor wrapper */
|
|
|
+#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
|
|
|
+static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
|
|
|
+{ \
|
|
|
+ return kvm_read_##name2(cop0); \
|
|
|
+} \
|
|
|
+static inline void kvm_write_##name1(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ kvm_write_##name2(cop0, val); \
|
|
|
+}
|
|
|
+
|
|
|
+/* Generate bitwise modifier wrapper */
|
|
|
+#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
|
|
|
+static inline void kvm_set_##name1(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ kvm_set_##name2(cop0, val); \
|
|
|
+} \
|
|
|
+static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ kvm_clear_##name2(cop0, val); \
|
|
|
+} \
|
|
|
+static inline void kvm_change_##name1(struct mips_coproc *cop0, \
|
|
|
+ __KVMT##type mask, \
|
|
|
+ __KVMT##type val) \
|
|
|
+{ \
|
|
|
+ kvm_change_##name2(cop0, mask, val); \
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
|
|
|
+ * These generate accessors operating on the saved context in RAM, and wrap them
|
|
|
+ * with the common guest C0 accessors (for use by common emulation code).
|
|
|
+ */
|
|
|
+
|
|
|
+#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
|
|
+
|
|
|
+#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
|
|
+
|
|
|
+#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
|
|
+
|
|
|
+#ifndef CONFIG_KVM_MIPS_VZ
|
|
|
+
|
|
|
+/*
|
|
|
+ * T&E (trap & emulate software based virtualisation)
|
|
|
+ * We generate the common accessors operating exclusively on the saved context
|
|
|
+ * in RAM.
|
|
|
+ */
|
|
|
+
|
|
|
+#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
|
|
|
+#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
|
|
|
+#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+/*
|
|
|
+ * VZ (hardware assisted virtualisation)
|
|
|
+ * These macros use the active guest state in VZ mode (hardware registers),
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
|
|
|
+ * These generate accessors operating on the VZ guest context in hardware, and
|
|
|
+ * wrap them with the common guest C0 accessors (for use by common emulation
|
|
|
+ * code).
|
|
|
+ *
|
|
|
+ * Accessors operating on the saved context in RAM are also generated to allow
|
|
|
+ * convenient explicit saving and restoring of the state.
|
|
|
+ */
|
|
|
+
|
|
|
+#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
|
|
|
+ __BUILD_KVM_SAVE_VZ(name, _reg, sel)
|
|
|
+
|
|
|
+#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
|
|
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
|
|
|
+
|
|
|
+/*
|
|
|
+ * We can't do atomic modifications of COP0 state if hardware can modify it.
|
|
|
+ * Races must be handled explicitly.
|
|
|
+ */
|
|
|
+#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * Define accessors for CP0 registers that are accessible to the guest. These
|
|
|
+ * are primarily used by common emulation code, which may need to access the
|
|
|
+ * registers differently depending on the implementation.
|
|
|
+ *
|
|
|
+ * fns_hw/sw name type reg num select
|
|
|
+ */
|
|
|
+__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
|
|
|
+__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
|
|
|
+__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
|
|
|
+__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
|
|
|
+__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
|
|
|
+__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
|
|
|
+__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
|
|
|
+__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
|
|
|
+__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
|
|
|
+__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
|
|
|
+__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
|
|
|
+__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
|
|
|
+__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
|
|
|
+__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
|
|
|
+__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
|
|
|
+__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
|
|
+__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
|
|
|
+__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
|
|
|
+__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
|
|
|
+__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
|
|
|
+__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
|
|
|
+__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
|
|
|
+__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
|
|
|
+__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
|
|
|
+__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
|
|
|
+__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
|
|
|
+__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
|
|
|
+__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
|
|
|
+__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
|
|
|
+__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
|
|
|
+__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
|
|
|
+__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
|
|
|
+__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
|
|
|
+__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
|
|
|
+
|
|
|
+/* Bitwise operations (on HW state) */
|
|
|
+__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
|
|
|
+/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
|
|
+__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
|
|
+__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
|
|
|
+
|
|
|
/* Helpers */
|
|
|
|
|
|
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|