Bläddra i källkod

Merge tag 'kvm-arm-for-3.15-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

First round of KVM/ARM Fixes for 3.15

Includes vgic fixes, a possible kernel corruption bug due to
misalignment of pages and disabling of KVM in KConfig on big-endian
systems, because the last one breaks the build.
Paolo Bonzini 11 år sedan
förälder
incheckning
a5a5aef451
3 ändrade filer med 18 tillägg och 14 borttagningar
  1. 1 1
      arch/arm/kvm/Kconfig
  2. 9 6
      arch/arm/kvm/mmu.c
  3. 8 7
      virt/kvm/arm/vgic.c

+ 1 - 1
arch/arm/kvm/Kconfig

@@ -23,7 +23,7 @@ config KVM
 	select HAVE_KVM_CPU_RELAX_INTERCEPT
 	select HAVE_KVM_CPU_RELAX_INTERCEPT
 	select KVM_MMIO
 	select KVM_MMIO
 	select KVM_ARM_HOST
 	select KVM_ARM_HOST
-	depends on ARM_VIRT_EXT && ARM_LPAE
+	depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
 	---help---
 	---help---
 	  Support hosting virtualized guest machines. You will also
 	  Support hosting virtualized guest machines. You will also
 	  need to select one or more of the processor modules below.
 	  need to select one or more of the processor modules below.

+ 9 - 6
arch/arm/kvm/mmu.c

@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
 static unsigned long hyp_idmap_end;
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 static phys_addr_t hyp_idmap_vector;
 
 
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
 #define kvm_pmd_huge(_x)	(pmd_huge(_x) || pmd_trans_huge(_x))
 #define kvm_pmd_huge(_x)	(pmd_huge(_x) || pmd_trans_huge(_x))
 
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
 	if (boot_hyp_pgd) {
 	if (boot_hyp_pgd) {
 		unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
 		unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
 		unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 		unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
-		kfree(boot_hyp_pgd);
+		free_pages((unsigned long)boot_hyp_pgd, pgd_order);
 		boot_hyp_pgd = NULL;
 		boot_hyp_pgd = NULL;
 	}
 	}
 
 
 	if (hyp_pgd)
 	if (hyp_pgd)
 		unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 		unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
 
-	kfree(init_bounce_page);
+	free_page((unsigned long)init_bounce_page);
 	init_bounce_page = NULL;
 	init_bounce_page = NULL;
 
 
 	mutex_unlock(&kvm_hyp_pgd_mutex);
 	mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
 		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
 		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
 			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
 
-		kfree(hyp_pgd);
+		free_pages((unsigned long)hyp_pgd, pgd_order);
 		hyp_pgd = NULL;
 		hyp_pgd = NULL;
 	}
 	}
 
 
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
 		size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
 		size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
 		phys_addr_t phys_base;
 		phys_addr_t phys_base;
 
 
-		init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+		init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
 		if (!init_bounce_page) {
 		if (!init_bounce_page) {
 			kvm_err("Couldn't allocate HYP init bounce page\n");
 			kvm_err("Couldn't allocate HYP init bounce page\n");
 			err = -ENOMEM;
 			err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
 			 (unsigned long)phys_base);
 			 (unsigned long)phys_base);
 	}
 	}
 
 
-	hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
-	boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+	hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+	boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
 	if (!hyp_pgd || !boot_hyp_pgd) {
 	if (!hyp_pgd || !boot_hyp_pgd) {
 		kvm_err("Hyp mode PGD not allocated\n");
 		kvm_err("Hyp mode PGD not allocated\n");
 		err = -ENOMEM;
 		err = -ENOMEM;

+ 8 - 7
virt/kvm/arm/vgic.c

@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
 	u32 val;
 	u32 val;
 	u32 *reg;
 	u32 *reg;
 
 
-	offset >>= 1;
 	reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
 	reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
-				  vcpu->vcpu_id, offset);
+				  vcpu->vcpu_id, offset >> 1);
 
 
-	if (offset & 2)
+	if (offset & 4)
 		val = *reg >> 16;
 		val = *reg >> 16;
 	else
 	else
 		val = *reg & 0xffff;
 		val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
 	vgic_reg_access(mmio, &val, offset,
 	vgic_reg_access(mmio, &val, offset,
 			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
 			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
 	if (mmio->is_write) {
 	if (mmio->is_write) {
-		if (offset < 4) {
+		if (offset < 8) {
 			*reg = ~0U; /* Force PPIs/SGIs to 1 */
 			*reg = ~0U; /* Force PPIs/SGIs to 1 */
 			return false;
 			return false;
 		}
 		}
 
 
 		val = vgic_cfg_compress(val);
 		val = vgic_cfg_compress(val);
-		if (offset & 2) {
+		if (offset & 4) {
 			*reg &= 0xffff;
 			*reg &= 0xffff;
 			*reg |= val << 16;
 			*reg |= val << 16;
 		} else {
 		} else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
 	case 0:
 	case 0:
 		if (!target_cpus)
 		if (!target_cpus)
 			return;
 			return;
+		break;
 
 
 	case 1:
 	case 1:
 		target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
 		target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
 	if (addr + size < addr)
 	if (addr + size < addr)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	*ioaddr = addr;
 	ret = vgic_ioaddr_overlap(kvm);
 	ret = vgic_ioaddr_overlap(kvm);
 	if (ret)
 	if (ret)
-		return ret;
-	*ioaddr = addr;
+		*ioaddr = VGIC_ADDR_UNDEF;
+
 	return ret;
 	return ret;
 }
 }