|
|
@@ -17,8 +17,12 @@
|
|
|
#define __ASM_HARDIRQ_H
|
|
|
|
|
|
#include <linux/cache.h>
|
|
|
+#include <linux/percpu.h>
|
|
|
#include <linux/threads.h>
|
|
|
+#include <asm/barrier.h>
|
|
|
#include <asm/irq.h>
|
|
|
+#include <asm/kvm_arm.h>
|
|
|
+#include <asm/sysreg.h>
|
|
|
|
|
|
#define NR_IPI 7
|
|
|
|
|
|
@@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
|
|
|
|
|
|
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
|
|
|
|
|
+struct nmi_ctx {
|
|
|
+ u64 hcr;
|
|
|
+};
|
|
|
+
|
|
|
+DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
|
|
+
|
|
|
+#define arch_nmi_enter() \
|
|
|
+ do { \
|
|
|
+ if (is_kernel_in_hyp_mode()) { \
|
|
|
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
|
|
|
+ nmi_ctx->hcr = read_sysreg(hcr_el2); \
|
|
|
+ if (!(nmi_ctx->hcr & HCR_TGE)) { \
|
|
|
+ write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
|
|
|
+ isb(); \
|
|
|
+ } \
|
|
|
+ } \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
+#define arch_nmi_exit() \
|
|
|
+ do { \
|
|
|
+ if (is_kernel_in_hyp_mode()) { \
|
|
|
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
|
|
|
+ if (!(nmi_ctx->hcr & HCR_TGE)) \
|
|
|
+ write_sysreg(nmi_ctx->hcr, hcr_el2); \
|
|
|
+ } \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
static inline void ack_bad_irq(unsigned int irq)
|
|
|
{
|
|
|
extern unsigned long irq_err_count;
|