Browse Source

Merge tag 'kvm-s390-20140530' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

1. Several minor fixes and cleanups for KVM:
2. Fix flag check for gdb support
3. Remove unnecessary vcpu start
4. Remove code duplication for sigp interrupts
5. Better DAT handling for the TPROT instruction
6. Correct addressing exception for standby memory
Paolo Bonzini 11 years ago
parent
commit
146b2cfe0c

+ 1 - 0
arch/s390/include/asm/kvm_host.h

@@ -110,6 +110,7 @@ struct kvm_s390_sie_block {
 #define ICTL_ISKE	0x00004000
 #define ICTL_ISKE	0x00004000
 #define ICTL_SSKE	0x00002000
 #define ICTL_SSKE	0x00002000
 #define ICTL_RRBE	0x00001000
 #define ICTL_RRBE	0x00001000
+#define ICTL_TPROT	0x00000200
 	__u32	ictl;			/* 0x0048 */
 	__u32	ictl;			/* 0x0048 */
 	__u32	eca;			/* 0x004c */
 	__u32	eca;			/* 0x004c */
 #define ICPT_INST	0x04
 #define ICPT_INST	0x04

+ 55 - 2
arch/s390/kvm/gaccess.c

@@ -292,7 +292,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
 		wake_up(&vcpu->kvm->arch.ipte_wq);
 		wake_up(&vcpu->kvm->arch.ipte_wq);
 }
 }
 
 
-static void ipte_lock(struct kvm_vcpu *vcpu)
+void ipte_lock(struct kvm_vcpu *vcpu)
 {
 {
 	if (vcpu->arch.sie_block->eca & 1)
 	if (vcpu->arch.sie_block->eca & 1)
 		ipte_lock_siif(vcpu);
 		ipte_lock_siif(vcpu);
@@ -300,7 +300,7 @@ static void ipte_lock(struct kvm_vcpu *vcpu)
 		ipte_lock_simple(vcpu);
 		ipte_lock_simple(vcpu);
 }
 }
 
 
-static void ipte_unlock(struct kvm_vcpu *vcpu)
+void ipte_unlock(struct kvm_vcpu *vcpu)
 {
 {
 	if (vcpu->arch.sie_block->eca & 1)
 	if (vcpu->arch.sie_block->eca & 1)
 		ipte_unlock_siif(vcpu);
 		ipte_unlock_siif(vcpu);
@@ -644,6 +644,59 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
 	return rc;
 	return rc;
 }
 }
 
 
+/**
+ * guest_translate_address - translate guest logical into guest absolute address
+ *
+ * Parameter semantics are the same as the ones from guest_translate.
+ * The memory contents at the guest address are not changed.
+ *
+ * Note: The IPTE lock is not taken during this function, so the caller
+ * has to take care of this.
+ */
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
+			    unsigned long *gpa, int write)
+{
+	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
+	psw_t *psw = &vcpu->arch.sie_block->gpsw;
+	struct trans_exc_code_bits *tec;
+	union asce asce;
+	int rc;
+
+	/* Access register mode is not supported yet. */
+	if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
+		return -EOPNOTSUPP;
+
+	gva = kvm_s390_logical_to_effective(vcpu, gva);
+	memset(pgm, 0, sizeof(*pgm));
+	tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+	tec->as = psw_bits(*psw).as;
+	tec->fsi = write ? FSI_STORE : FSI_FETCH;
+	tec->addr = gva >> PAGE_SHIFT;
+	if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
+		if (write) {
+			rc = pgm->code = PGM_PROTECTION;
+			return rc;
+		}
+	}
+
+	asce.val = get_vcpu_asce(vcpu);
+	if (psw_bits(*psw).t && !asce.r) {	/* Use DAT? */
+		rc = guest_translate(vcpu, gva, gpa, write);
+		if (rc > 0) {
+			if (rc == PGM_PROTECTION)
+				tec->b61 = 1;
+			pgm->code = rc;
+		}
+	} else {
+		rc = 0;
+		*gpa = kvm_s390_real_to_abs(vcpu, gva);
+		if (kvm_is_error_gpa(vcpu->kvm, *gpa))
+			rc = pgm->code = PGM_ADDRESSING;
+	}
+
+	return rc;
+}
+
 /**
 /**
  * kvm_s390_check_low_addr_protection - check for low-address protection
  * kvm_s390_check_low_addr_protection - check for low-address protection
  * @ga: Guest address
  * @ga: Guest address

+ 5 - 0
arch/s390/kvm/gaccess.h

@@ -155,6 +155,9 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
 }
 }
 
 
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
+			    unsigned long *gpa, int write);
+
 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
 		 unsigned long len, int write);
 		 unsigned long len, int write);
 
 
@@ -324,6 +327,8 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
 	return access_guest_real(vcpu, gra, data, len, 0);
 	return access_guest_real(vcpu, gra, data, len, 0);
 }
 }
 
 
+void ipte_lock(struct kvm_vcpu *vcpu);
+void ipte_unlock(struct kvm_vcpu *vcpu);
 int ipte_lock_held(struct kvm_vcpu *vcpu);
 int ipte_lock_held(struct kvm_vcpu *vcpu);
 int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
 int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
 
 

+ 0 - 1
arch/s390/kvm/interrupt.c

@@ -442,7 +442,6 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
 		rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
 		rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
 				    &vcpu->arch.sie_block->gpsw,
 				    &vcpu->arch.sie_block->gpsw,
 				    sizeof(psw_t));
 				    sizeof(psw_t));
-		kvm_s390_vcpu_start(vcpu);
 		break;
 		break;
 	case KVM_S390_PROGRAM_INT:
 	case KVM_S390_PROGRAM_INT:
 		VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
 		VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",

+ 4 - 2
arch/s390/kvm/kvm-s390.c

@@ -637,7 +637,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	if (sclp_has_siif())
 	if (sclp_has_siif())
 		vcpu->arch.sie_block->eca |= 1;
 		vcpu->arch.sie_block->eca |= 1;
 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
-	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
+	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
+				      ICTL_TPROT;
+
 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
 		if (rc)
 		if (rc)
@@ -950,7 +952,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 	vcpu->guest_debug = 0;
 	vcpu->guest_debug = 0;
 	kvm_s390_clear_bp_data(vcpu);
 	kvm_s390_clear_bp_data(vcpu);
 
 
-	if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS)
+	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
 	if (dbg->control & KVM_GUESTDBG_ENABLE) {

+ 33 - 23
arch/s390/kvm/priv.c

@@ -930,8 +930,9 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
 static int handle_tprot(struct kvm_vcpu *vcpu)
 static int handle_tprot(struct kvm_vcpu *vcpu)
 {
 {
 	u64 address1, address2;
 	u64 address1, address2;
-	struct vm_area_struct *vma;
-	unsigned long user_address;
+	unsigned long hva, gpa;
+	int ret = 0, cc = 0;
+	bool writable;
 
 
 	vcpu->stat.instruction_tprot++;
 	vcpu->stat.instruction_tprot++;
 
 
@@ -942,32 +943,41 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
 
 
 	/* we only handle the Linux memory detection case:
 	/* we only handle the Linux memory detection case:
 	 * access key == 0
 	 * access key == 0
-	 * guest DAT == off
 	 * everything else goes to userspace. */
 	 * everything else goes to userspace. */
 	if (address2 & 0xf0)
 	if (address2 & 0xf0)
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
-		return -EOPNOTSUPP;
-
-	down_read(&current->mm->mmap_sem);
-	user_address = __gmap_translate(address1, vcpu->arch.gmap);
-	if (IS_ERR_VALUE(user_address))
-		goto out_inject;
-	vma = find_vma(current->mm, user_address);
-	if (!vma)
-		goto out_inject;
-	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
-	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
-		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
-	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
-		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
-
-	up_read(&current->mm->mmap_sem);
-	return 0;
+		ipte_lock(vcpu);
+	ret = guest_translate_address(vcpu, address1, &gpa, 1);
+	if (ret == PGM_PROTECTION) {
+		/* Write protected? Try again with read-only... */
+		cc = 1;
+		ret = guest_translate_address(vcpu, address1, &gpa, 0);
+	}
+	if (ret) {
+		if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
+			ret = kvm_s390_inject_program_int(vcpu, ret);
+		} else if (ret > 0) {
+			/* Translation not available */
+			kvm_s390_set_psw_cc(vcpu, 3);
+			ret = 0;
+		}
+		goto out_unlock;
+	}
 
 
-out_inject:
-	up_read(&current->mm->mmap_sem);
-	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
+	if (kvm_is_error_hva(hva)) {
+		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+	} else {
+		if (!writable)
+			cc = 1;		/* Write not permitted ==> read-only */
+		kvm_s390_set_psw_cc(vcpu, cc);
+		/* Note: CC2 only occurs for storage keys (not supported yet) */
+	}
+out_unlock:
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
+		ipte_unlock(vcpu);
+	return ret;
 }
 }
 
 
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)

+ 18 - 38
arch/s390/kvm/sigp.c

@@ -54,33 +54,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
 
 
 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
 {
 {
-	struct kvm_s390_local_interrupt *li;
-	struct kvm_s390_interrupt_info *inti;
+	struct kvm_s390_interrupt s390int = {
+		.type = KVM_S390_INT_EMERGENCY,
+		.parm = vcpu->vcpu_id,
+	};
 	struct kvm_vcpu *dst_vcpu = NULL;
 	struct kvm_vcpu *dst_vcpu = NULL;
+	int rc = 0;
 
 
 	if (cpu_addr < KVM_MAX_VCPUS)
 	if (cpu_addr < KVM_MAX_VCPUS)
 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
 	if (!dst_vcpu)
 	if (!dst_vcpu)
 		return SIGP_CC_NOT_OPERATIONAL;
 		return SIGP_CC_NOT_OPERATIONAL;
 
 
-	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
-	if (!inti)
-		return -ENOMEM;
-
-	inti->type = KVM_S390_INT_EMERGENCY;
-	inti->emerg.code = vcpu->vcpu_id;
+	rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+	if (!rc)
+		VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
 
 
-	li = &dst_vcpu->arch.local_int;
-	spin_lock_bh(&li->lock);
-	list_add_tail(&inti->list, &li->list);
-	atomic_set(&li->active, 1);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-	if (waitqueue_active(li->wq))
-		wake_up_interruptible(li->wq);
-	spin_unlock_bh(&li->lock);
-	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
-
-	return SIGP_CC_ORDER_CODE_ACCEPTED;
+	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
 }
 }
 
 
 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
@@ -116,33 +106,23 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
 
 
 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
 {
 {
-	struct kvm_s390_local_interrupt *li;
-	struct kvm_s390_interrupt_info *inti;
+	struct kvm_s390_interrupt s390int = {
+		.type = KVM_S390_INT_EXTERNAL_CALL,
+		.parm = vcpu->vcpu_id,
+	};
 	struct kvm_vcpu *dst_vcpu = NULL;
 	struct kvm_vcpu *dst_vcpu = NULL;
+	int rc;
 
 
 	if (cpu_addr < KVM_MAX_VCPUS)
 	if (cpu_addr < KVM_MAX_VCPUS)
 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
 	if (!dst_vcpu)
 	if (!dst_vcpu)
 		return SIGP_CC_NOT_OPERATIONAL;
 		return SIGP_CC_NOT_OPERATIONAL;
 
 
-	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
-	if (!inti)
-		return -ENOMEM;
-
-	inti->type = KVM_S390_INT_EXTERNAL_CALL;
-	inti->extcall.code = vcpu->vcpu_id;
-
-	li = &dst_vcpu->arch.local_int;
-	spin_lock_bh(&li->lock);
-	list_add_tail(&inti->list, &li->list);
-	atomic_set(&li->active, 1);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-	if (waitqueue_active(li->wq))
-		wake_up_interruptible(li->wq);
-	spin_unlock_bh(&li->lock);
-	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
+	rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+	if (!rc)
+		VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
 
 
-	return SIGP_CC_ORDER_CODE_ACCEPTED;
+	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
 }
 }
 
 
 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)