Browse Source

Merge tag 'kvm-arm-fixes-for-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/ARM Fixes for v4.15, Round 2

Fixes:
 - A bug in our handling of SPE state for non-vhe systems
 - A bug that causes hyp unmapping to go off limits and crash the system on
   shutdown
 - Three timer fixes that were introduced as part of the timer optimizations
   for v4.15
Paolo Bonzini 8 years ago
parent
commit
43aabca38a

+ 3 - 0
arch/arm64/kvm/hyp/debug-sr.c

@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 {
 {
 	u64 reg;
 	u64 reg;
 
 
+	/* Clear pmscr in case of early return */
+	*pmscr_el1 = 0;
+
 	/* SPE present on this CPU? */
 	/* SPE present on this CPU? */
 	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
 	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
 						  ID_AA64DFR0_PMSVER_SHIFT))
 						  ID_AA64DFR0_PMSVER_SHIFT))

+ 1 - 1
include/kvm/arm_arch_timer.h

@@ -62,7 +62,7 @@ struct arch_timer_cpu {
 	bool			enabled;
 	bool			enabled;
 };
 };
 
 
-int kvm_timer_hyp_init(void);
+int kvm_timer_hyp_init(bool);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);

+ 24 - 16
virt/kvm/arm/arch_timer.c

@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
 {
 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
 	struct arch_timer_context *vtimer;
 	struct arch_timer_context *vtimer;
+	u32 cnt_ctl;
 
 
-	if (!vcpu) {
-		pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
-		return IRQ_NONE;
-	}
-	vtimer = vcpu_vtimer(vcpu);
+	/*
+	 * We may see a timer interrupt after vcpu_put() has been called which
+	 * sets the CPU's vcpu pointer to NULL, because even though the timer
+	 * has been disabled in vtimer_save_state(), the hardware interrupt
+	 * signal may not have been retired from the interrupt controller yet.
+	 */
+	if (!vcpu)
+		return IRQ_HANDLED;
 
 
+	vtimer = vcpu_vtimer(vcpu);
 	if (!vtimer->irq.level) {
 	if (!vtimer->irq.level) {
-		vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-		if (kvm_timer_irq_can_fire(vtimer))
+		cnt_ctl = read_sysreg_el0(cntv_ctl);
+		cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+			   ARCH_TIMER_CTRL_IT_MASK;
+		if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
 			kvm_timer_update_irq(vcpu, true, vtimer);
 			kvm_timer_update_irq(vcpu, true, vtimer);
 	}
 	}
 
 
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
 
 
 	/* Disable the virtual timer */
 	/* Disable the virtual timer */
 	write_sysreg_el0(0, cntv_ctl);
 	write_sysreg_el0(0, cntv_ctl);
+	isb();
 
 
 	vtimer->loaded = false;
 	vtimer->loaded = false;
 out:
 out:
@@ -720,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
 	return 0;
 	return 0;
 }
 }
 
 
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
 {
 {
 	struct arch_timer_kvm_info *info;
 	struct arch_timer_kvm_info *info;
 	int err;
 	int err;
@@ -756,10 +764,13 @@ int kvm_timer_hyp_init(void)
 		return err;
 		return err;
 	}
 	}
 
 
-	err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
-	if (err) {
-		kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
-		goto out_free_irq;
+	if (has_gic) {
+		err = irq_set_vcpu_affinity(host_vtimer_irq,
+					    kvm_get_running_vcpus());
+		if (err) {
+			kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+			goto out_free_irq;
+		}
 	}
 	}
 
 
 	kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
 	kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -835,10 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 no_vgic:
 no_vgic:
 	preempt_disable();
 	preempt_disable();
 	timer->enabled = 1;
 	timer->enabled = 1;
-	if (!irqchip_in_kernel(vcpu->kvm))
-		kvm_timer_vcpu_load_user(vcpu);
-	else
-		kvm_timer_vcpu_load_vgic(vcpu);
+	kvm_timer_vcpu_load(vcpu);
 	preempt_enable();
 	preempt_enable();
 
 
 	return 0;
 	return 0;

+ 1 - 1
virt/kvm/arm/arm.c

@@ -1326,7 +1326,7 @@ static int init_subsystems(void)
 	/*
 	/*
 	 * Init HYP architected timer support
 	 * Init HYP architected timer support
 	 */
 	 */
-	err = kvm_timer_hyp_init();
+	err = kvm_timer_hyp_init(vgic_present);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 

+ 4 - 6
virt/kvm/arm/mmu.c

@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  */
  */
 void free_hyp_pgds(void)
 void free_hyp_pgds(void)
 {
 {
-	unsigned long addr;
-
 	mutex_lock(&kvm_hyp_pgd_mutex);
 	mutex_lock(&kvm_hyp_pgd_mutex);
 
 
 	if (boot_hyp_pgd) {
 	if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
 
 
 	if (hyp_pgd) {
 	if (hyp_pgd) {
 		unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
 		unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-			unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-			unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+		unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+				(uintptr_t)high_memory - PAGE_OFFSET);
+		unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+				VMALLOC_END - VMALLOC_START);
 
 
 		free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
 		free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
 		hyp_pgd = NULL;
 		hyp_pgd = NULL;