浏览代码

Merge tag 'kvm-s390-next-20150209' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: fixes and features for kvm/next (3.20)

1. Fixes
- Fix user triggerable endless loop
- reenable LPP facility
- disable KVM compat ioctl on s390 (untested and broken)

2. cpu models for s390
- provide facilities and instruction blocking per VM
- add s390 specific vm attributes for setting values

3. crypto
- toleration patch for z13 support

4. add uuid and long name to /proc/sysinfo (stsi 322)
- patch Acked by Heiko Carstens (touches non-kvm s390 code)
Paolo Bonzini 10 年之前
父节点
当前提交
ccd9e785ea

+ 45 - 0
Documentation/virtual/kvm/devices/vm.txt

@@ -38,3 +38,48 @@ Allows userspace to query the actual limit and set a new limit for
 the maximum guest memory size. The limit will be rounded up to
 the maximum guest memory size. The limit will be rounded up to
 2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
 2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
 the number of page table levels.
 the number of page table levels.
+
+2. GROUP: KVM_S390_VM_CPU_MODEL
+Architectures: s390
+
+2.1. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE (r/o)
+
+Allows user space to retrieve machine and kvm specific cpu related information:
+
+struct kvm_s390_vm_cpu_machine {
+       __u64 cpuid;           # CPUID of host
+       __u32 ibc;             # IBC level range offered by host
+       __u8  pad[4];
+       __u64 fac_mask[256];   # set of cpu facilities enabled by KVM
+       __u64 fac_list[256];   # set of cpu facilities offered by host
+}
+
+Parameters: address of buffer to store the machine related cpu data
+            of type struct kvm_s390_vm_cpu_machine*
+Returns:    -EFAULT if the given address is not accessible from kernel space
+	    -ENOMEM if not enough memory is available to process the ioctl
+	    0 in case of success
+
+2.2. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR (r/w)
+
+Allows user space to retrieve or request to change cpu related information for a vcpu:
+
+struct kvm_s390_vm_cpu_processor {
+       __u64 cpuid;           # CPUID currently (to be) used by this vcpu
+       __u16 ibc;             # IBC level currently (to be) used by this vcpu
+       __u8  pad[6];
+       __u64 fac_list[256];   # set of cpu facilities currently (to be) used
+                              # by this vcpu
+}
+
+KVM does not enforce or limit the cpu model data in any form. Take the information
+retrieved by means of KVM_S390_VM_CPU_MACHINE as hint for reasonable configuration
+setups. Instruction interceptions triggered by additionally set facilitiy bits that
+are not handled by KVM need to by imlemented in the VM driver code.
+
+Parameters: address of buffer to store/set the processor related cpu
+	    data of type struct kvm_s390_vm_cpu_processor*.
+Returns:    -EBUSY in case 1 or more vcpus are already activated (only in write case)
+	    -EFAULT if the given address is not accessible from kernel space
+	    -ENOMEM if not enough memory is available to process the ioctl
+	    0 in case of success

+ 26 - 1
arch/s390/include/asm/kvm_host.h

@@ -89,7 +89,8 @@ struct kvm_s390_sie_block {
 	atomic_t cpuflags;		/* 0x0000 */
 	atomic_t cpuflags;		/* 0x0000 */
 	__u32 : 1;			/* 0x0004 */
 	__u32 : 1;			/* 0x0004 */
 	__u32 prefix : 18;
 	__u32 prefix : 18;
-	__u32 : 13;
+	__u32 : 1;
+	__u32 ibc : 12;
 	__u8	reserved08[4];		/* 0x0008 */
 	__u8	reserved08[4];		/* 0x0008 */
 #define PROG_IN_SIE (1<<0)
 #define PROG_IN_SIE (1<<0)
 	__u32	prog0c;			/* 0x000c */
 	__u32	prog0c;			/* 0x000c */
@@ -163,6 +164,7 @@ struct kvm_s390_sie_block {
 	__u64	tecmc;			/* 0x00e8 */
 	__u64	tecmc;			/* 0x00e8 */
 	__u8	reservedf0[12];		/* 0x00f0 */
 	__u8	reservedf0[12];		/* 0x00f0 */
 #define CRYCB_FORMAT1 0x00000001
 #define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
 	__u32	crycbd;			/* 0x00fc */
 	__u32	crycbd;			/* 0x00fc */
 	__u64	gcr[16];		/* 0x0100 */
 	__u64	gcr[16];		/* 0x0100 */
 	__u64	gbea;			/* 0x0180 */
 	__u64	gbea;			/* 0x0180 */
@@ -505,6 +507,27 @@ struct s390_io_adapter {
 #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
 #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
 #define MAX_S390_ADAPTER_MAPS 256
 #define MAX_S390_ADAPTER_MAPS 256
 
 
+/* maximum size of facilities and facility mask is 2k bytes */
+#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
+#define S390_ARCH_FAC_LIST_SIZE_U64 \
+	(S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
+#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
+#define S390_ARCH_FAC_MASK_SIZE_U64 \
+	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
+
+struct s390_model_fac {
+	/* facilities used in SIE context */
+	__u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
+	/* subset enabled by kvm */
+	__u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+};
+
+struct kvm_s390_cpu_model {
+	struct s390_model_fac *fac;
+	struct cpuid cpu_id;
+	unsigned short ibc;
+};
+
 struct kvm_s390_crypto {
 struct kvm_s390_crypto {
 	struct kvm_s390_crypto_cb *crycb;
 	struct kvm_s390_crypto_cb *crycb;
 	__u32 crycbd;
 	__u32 crycbd;
@@ -516,6 +539,7 @@ struct kvm_s390_crypto_cb {
 	__u8    reserved00[72];                 /* 0x0000 */
 	__u8    reserved00[72];                 /* 0x0000 */
 	__u8    dea_wrapping_key_mask[24];      /* 0x0048 */
 	__u8    dea_wrapping_key_mask[24];      /* 0x0048 */
 	__u8    aes_wrapping_key_mask[32];      /* 0x0060 */
 	__u8    aes_wrapping_key_mask[32];      /* 0x0060 */
+	__u8    reserved80[128];                /* 0x0080 */
 };
 };
 
 
 struct kvm_arch{
 struct kvm_arch{
@@ -534,6 +558,7 @@ struct kvm_arch{
 	int ipte_lock_count;
 	int ipte_lock_count;
 	struct mutex ipte_mutex;
 	struct mutex ipte_mutex;
 	spinlock_t start_stop_lock;
 	spinlock_t start_stop_lock;
+	struct kvm_s390_cpu_model model;
 	struct kvm_s390_crypto crypto;
 	struct kvm_s390_crypto crypto;
 	u64 epoch;
 	u64 epoch;
 };
 };

+ 7 - 3
arch/s390/include/asm/sysinfo.h

@@ -15,6 +15,7 @@
 #define __ASM_S390_SYSINFO_H
 #define __ASM_S390_SYSINFO_H
 
 
 #include <asm/bitsperlong.h>
 #include <asm/bitsperlong.h>
+#include <linux/uuid.h>
 
 
 struct sysinfo_1_1_1 {
 struct sysinfo_1_1_1 {
 	unsigned char p:1;
 	unsigned char p:1;
@@ -112,10 +113,13 @@ struct sysinfo_3_2_2 {
 		char name[8];
 		char name[8];
 		unsigned int caf;
 		unsigned int caf;
 		char cpi[16];
 		char cpi[16];
-		char reserved_1[24];
-
+		char reserved_1[3];
+		char ext_name_encoding;
+		unsigned int reserved_2;
+		uuid_be uuid;
 	} vm[8];
 	} vm[8];
-	char reserved_544[3552];
+	char reserved_3[1504];
+	char ext_names[8][256];
 };
 };
 
 
 extern int topology_max_mnest;
 extern int topology_max_mnest;

+ 21 - 0
arch/s390/include/uapi/asm/kvm.h

@@ -59,6 +59,7 @@ struct kvm_s390_io_adapter_req {
 #define KVM_S390_VM_MEM_CTRL		0
 #define KVM_S390_VM_MEM_CTRL		0
 #define KVM_S390_VM_TOD			1
 #define KVM_S390_VM_TOD			1
 #define KVM_S390_VM_CRYPTO		2
 #define KVM_S390_VM_CRYPTO		2
+#define KVM_S390_VM_CPU_MODEL		3
 
 
 /* kvm attributes for mem_ctrl */
 /* kvm attributes for mem_ctrl */
 #define KVM_S390_VM_MEM_ENABLE_CMMA	0
 #define KVM_S390_VM_MEM_ENABLE_CMMA	0
@@ -69,6 +70,26 @@ struct kvm_s390_io_adapter_req {
 #define KVM_S390_VM_TOD_LOW		0
 #define KVM_S390_VM_TOD_LOW		0
 #define KVM_S390_VM_TOD_HIGH		1
 #define KVM_S390_VM_TOD_HIGH		1
 
 
+/* kvm attributes for KVM_S390_VM_CPU_MODEL */
+/* processor related attributes are r/w */
+#define KVM_S390_VM_CPU_PROCESSOR	0
+struct kvm_s390_vm_cpu_processor {
+	__u64 cpuid;
+	__u16 ibc;
+	__u8  pad[6];
+	__u64 fac_list[256];
+};
+
+/* machine related attributes are r/o */
+#define KVM_S390_VM_CPU_MACHINE		1
+struct kvm_s390_vm_cpu_machine {
+	__u64 cpuid;
+	__u32 ibc;
+	__u8  pad[4];
+	__u64 fac_mask[256];
+	__u64 fac_list[256];
+};
+
 /* kvm attributes for crypto */
 /* kvm attributes for crypto */
 #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW	0
 #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW	0
 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW	1
 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW	1

+ 29 - 0
arch/s390/kernel/sysinfo.c

@@ -196,6 +196,33 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
 	seq_printf(m, "LPAR CPUs Shared:     %d\n", info->cpus_shared);
 	seq_printf(m, "LPAR CPUs Shared:     %d\n", info->cpus_shared);
 }
 }
 
 
+static void print_ext_name(struct seq_file *m, int lvl,
+			   struct sysinfo_3_2_2 *info)
+{
+	if (info->vm[lvl].ext_name_encoding == 0)
+		return;
+	if (info->ext_names[lvl][0] == 0)
+		return;
+	switch (info->vm[lvl].ext_name_encoding) {
+	case 1: /* EBCDIC */
+		EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
+		break;
+	case 2:	/* UTF-8 */
+		break;
+	default:
+		return;
+	}
+	seq_printf(m, "VM%02d Extended Name:   %-.256s\n", lvl,
+		   info->ext_names[lvl]);
+}
+
+static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
+{
+	if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+		return;
+	seq_printf(m, "VM%02d UUID:            %pUb\n", i, &info->vm[i].uuid);
+}
+
 static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
 static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
 {
 {
 	int i;
 	int i;
@@ -213,6 +240,8 @@ static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
 		seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
 		seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
 		seq_printf(m, "VM%02d CPUs Standby:    %d\n", i, info->vm[i].cpus_standby);
 		seq_printf(m, "VM%02d CPUs Standby:    %d\n", i, info->vm[i].cpus_standby);
 		seq_printf(m, "VM%02d CPUs Reserved:   %d\n", i, info->vm[i].cpus_reserved);
 		seq_printf(m, "VM%02d CPUs Reserved:   %d\n", i, info->vm[i].cpus_reserved);
+		print_ext_name(m, i, info);
+		print_uuid(m, i, info);
 	}
 	}
 }
 }
 
 

+ 2 - 2
arch/s390/kvm/gaccess.c

@@ -357,8 +357,8 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
 	union asce asce;
 	union asce asce;
 
 
 	ctlreg0.val = vcpu->arch.sie_block->gcr[0];
 	ctlreg0.val = vcpu->arch.sie_block->gcr[0];
-	edat1 = ctlreg0.edat && test_vfacility(8);
-	edat2 = edat1 && test_vfacility(78);
+	edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
+	edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
 	asce.val = get_vcpu_asce(vcpu);
 	asce.val = get_vcpu_asce(vcpu);
 	if (asce.r)
 	if (asce.r)
 		goto real_address;
 		goto real_address;

+ 2 - 0
arch/s390/kvm/interrupt.c

@@ -1244,6 +1244,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
 		list_add_tail(&inti->list, &iter->list);
 		list_add_tail(&inti->list, &iter->list);
 	}
 	}
 	atomic_set(&fi->active, 1);
 	atomic_set(&fi->active, 1);
+	if (atomic_read(&kvm->online_vcpus) == 0)
+		goto unlock_fi;
 	sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
 	sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
 	if (sigcpu == KVM_MAX_VCPUS) {
 	if (sigcpu == KVM_MAX_VCPUS) {
 		do {
 		do {

+ 237 - 36
arch/s390/kvm/kvm-s390.c

@@ -30,7 +30,6 @@
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 #include <asm/nmi.h>
 #include <asm/nmi.h>
 #include <asm/switch_to.h>
 #include <asm/switch_to.h>
-#include <asm/facility.h>
 #include <asm/sclp.h>
 #include <asm/sclp.h>
 #include "kvm-s390.h"
 #include "kvm-s390.h"
 #include "gaccess.h"
 #include "gaccess.h"
@@ -100,15 +99,20 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ NULL }
 	{ NULL }
 };
 };
 
 
-unsigned long *vfacilities;
-static struct gmap_notifier gmap_notifier;
+/* upper facilities limit for kvm */
+unsigned long kvm_s390_fac_list_mask[] = {
+	0xff82fffbf4fc2000UL,
+	0x005c000000000000UL,
+};
 
 
-/* test availability of vfacility */
-int test_vfacility(unsigned long nr)
+unsigned long kvm_s390_fac_list_mask_size(void)
 {
 {
-	return __test_facility(nr, (void *) vfacilities);
+	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
+	return ARRAY_SIZE(kvm_s390_fac_list_mask);
 }
 }
 
 
+static struct gmap_notifier gmap_notifier;
+
 /* Section: not file related */
 /* Section: not file related */
 int kvm_arch_hardware_enable(void)
 int kvm_arch_hardware_enable(void)
 {
 {
@@ -351,7 +355,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
 	struct kvm_vcpu *vcpu;
 	struct kvm_vcpu *vcpu;
 	int i;
 	int i;
 
 
-	if (!test_vfacility(76))
+	if (!test_kvm_facility(kvm, 76))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	mutex_lock(&kvm->lock);
 	mutex_lock(&kvm->lock);
@@ -498,6 +502,106 @@ static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
 	return ret;
 	return ret;
 }
 }
 
 
+static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+	struct kvm_s390_vm_cpu_processor *proc;
+	int ret = 0;
+
+	mutex_lock(&kvm->lock);
+	if (atomic_read(&kvm->online_vcpus)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+	if (!proc) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	if (!copy_from_user(proc, (void __user *)attr->addr,
+			    sizeof(*proc))) {
+		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
+		       sizeof(struct cpuid));
+		kvm->arch.model.ibc = proc->ibc;
+		memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+		       S390_ARCH_FAC_LIST_SIZE_BYTE);
+	} else
+		ret = -EFAULT;
+	kfree(proc);
+out:
+	mutex_unlock(&kvm->lock);
+	return ret;
+}
+
+static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+	int ret = -ENXIO;
+
+	switch (attr->attr) {
+	case KVM_S390_VM_CPU_PROCESSOR:
+		ret = kvm_s390_set_processor(kvm, attr);
+		break;
+	}
+	return ret;
+}
+
+static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+	struct kvm_s390_vm_cpu_processor *proc;
+	int ret = 0;
+
+	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+	if (!proc) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
+	proc->ibc = kvm->arch.model.ibc;
+	memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
+		ret = -EFAULT;
+	kfree(proc);
+out:
+	return ret;
+}
+
+static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+	struct kvm_s390_vm_cpu_machine *mach;
+	int ret = 0;
+
+	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
+	if (!mach) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	get_cpu_id((struct cpuid *) &mach->cpuid);
+	mach->ibc = sclp_get_ibc();
+	memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
+	       kvm_s390_fac_list_mask_size() * sizeof(u64));
+	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
+	       S390_ARCH_FAC_LIST_SIZE_U64);
+	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
+		ret = -EFAULT;
+	kfree(mach);
+out:
+	return ret;
+}
+
+static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+	int ret = -ENXIO;
+
+	switch (attr->attr) {
+	case KVM_S390_VM_CPU_PROCESSOR:
+		ret = kvm_s390_get_processor(kvm, attr);
+		break;
+	case KVM_S390_VM_CPU_MACHINE:
+		ret = kvm_s390_get_machine(kvm, attr);
+		break;
+	}
+	return ret;
+}
+
 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 {
 {
 	int ret;
 	int ret;
@@ -509,6 +613,9 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 	case KVM_S390_VM_TOD:
 	case KVM_S390_VM_TOD:
 		ret = kvm_s390_set_tod(kvm, attr);
 		ret = kvm_s390_set_tod(kvm, attr);
 		break;
 		break;
+	case KVM_S390_VM_CPU_MODEL:
+		ret = kvm_s390_set_cpu_model(kvm, attr);
+		break;
 	case KVM_S390_VM_CRYPTO:
 	case KVM_S390_VM_CRYPTO:
 		ret = kvm_s390_vm_set_crypto(kvm, attr);
 		ret = kvm_s390_vm_set_crypto(kvm, attr);
 		break;
 		break;
@@ -531,6 +638,9 @@ static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 	case KVM_S390_VM_TOD:
 	case KVM_S390_VM_TOD:
 		ret = kvm_s390_get_tod(kvm, attr);
 		ret = kvm_s390_get_tod(kvm, attr);
 		break;
 		break;
+	case KVM_S390_VM_CPU_MODEL:
+		ret = kvm_s390_get_cpu_model(kvm, attr);
+		break;
 	default:
 	default:
 		ret = -ENXIO;
 		ret = -ENXIO;
 		break;
 		break;
@@ -567,6 +677,17 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 			break;
 			break;
 		}
 		}
 		break;
 		break;
+	case KVM_S390_VM_CPU_MODEL:
+		switch (attr->attr) {
+		case KVM_S390_VM_CPU_PROCESSOR:
+		case KVM_S390_VM_CPU_MACHINE:
+			ret = 0;
+			break;
+		default:
+			ret = -ENXIO;
+			break;
+		}
+		break;
 	case KVM_S390_VM_CRYPTO:
 	case KVM_S390_VM_CRYPTO:
 		switch (attr->attr) {
 		switch (attr->attr) {
 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
@@ -654,9 +775,61 @@ long kvm_arch_vm_ioctl(struct file *filp,
 	return r;
 	return r;
 }
 }
 
 
+static int kvm_s390_query_ap_config(u8 *config)
+{
+	u32 fcn_code = 0x04000000UL;
+	u32 cc;
+
+	asm volatile(
+		"lgr 0,%1\n"
+		"lgr 2,%2\n"
+		".long 0xb2af0000\n"		/* PQAP(QCI) */
+		"ipm %0\n"
+		"srl %0,28\n"
+		: "=r" (cc)
+		: "r" (fcn_code), "r" (config)
+		: "cc", "0", "2", "memory"
+	);
+
+	return cc;
+}
+
+static int kvm_s390_apxa_installed(void)
+{
+	u8 config[128];
+	int cc;
+
+	if (test_facility(2) && test_facility(12)) {
+		cc = kvm_s390_query_ap_config(config);
+
+		if (cc)
+			pr_err("PQAP(QCI) failed with cc=%d", cc);
+		else
+			return config[0] & 0x40;
+	}
+
+	return 0;
+}
+
+static void kvm_s390_set_crycb_format(struct kvm *kvm)
+{
+	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+
+	if (kvm_s390_apxa_installed())
+		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
+	else
+		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
+}
+
+static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
+{
+	get_cpu_id(cpu_id);
+	cpu_id->version = 0xff;
+}
+
 static int kvm_s390_crypto_init(struct kvm *kvm)
 static int kvm_s390_crypto_init(struct kvm *kvm)
 {
 {
-	if (!test_vfacility(76))
+	if (!test_kvm_facility(kvm, 76))
 		return 0;
 		return 0;
 
 
 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
@@ -664,8 +837,7 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
 	if (!kvm->arch.crypto.crycb)
 	if (!kvm->arch.crypto.crycb)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
-				  CRYCB_FORMAT1;
+	kvm_s390_set_crycb_format(kvm);
 
 
 	/* Disable AES/DEA protected key functions by default */
 	/* Disable AES/DEA protected key functions by default */
 	kvm->arch.crypto.aes_kw = 0;
 	kvm->arch.crypto.aes_kw = 0;
@@ -676,7 +848,7 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
 
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
 {
-	int rc;
+	int i, rc;
 	char debug_name[16];
 	char debug_name[16];
 	static unsigned long sca_offset;
 	static unsigned long sca_offset;
 
 
@@ -711,6 +883,46 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	if (!kvm->arch.dbf)
 	if (!kvm->arch.dbf)
 		goto out_nodbf;
 		goto out_nodbf;
 
 
+	/*
+	 * The architectural maximum amount of facilities is 16 kbit. To store
+	 * this amount, 2 kbyte of memory is required. Thus we need a full
+	 * page to hold the active copy (arch.model.fac->sie) and the current
+	 * facilities set (arch.model.fac->kvm). Its address size has to be
+	 * 31 bits and word aligned.
+	 */
+	kvm->arch.model.fac =
+		(struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!kvm->arch.model.fac)
+		goto out_nofac;
+
+	memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
+	       S390_ARCH_FAC_LIST_SIZE_U64);
+
+	/*
+	 * If this KVM host runs *not* in a LPAR, relax the facility bits
+	 * of the kvm facility mask by all missing facilities. This will allow
+	 * to determine the right CPU model by means of the remaining facilities.
+	 * Live guest migration must prohibit the migration of KVMs running in
+	 * a LPAR to non LPAR hosts.
+	 */
+	if (!MACHINE_IS_LPAR)
+		for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
+			kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
+
+	/*
+	 * Apply the kvm facility mask to limit the kvm supported/tolerated
+	 * facility list.
+	 */
+	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
+		if (i < kvm_s390_fac_list_mask_size())
+			kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+		else
+			kvm->arch.model.fac->kvm[i] = 0UL;
+	}
+
+	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
+	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
+
 	if (kvm_s390_crypto_init(kvm) < 0)
 	if (kvm_s390_crypto_init(kvm) < 0)
 		goto out_crypto;
 		goto out_crypto;
 
 
@@ -742,6 +954,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 out_nogmap:
 out_nogmap:
 	kfree(kvm->arch.crypto.crycb);
 	kfree(kvm->arch.crypto.crycb);
 out_crypto:
 out_crypto:
+	free_page((unsigned long)kvm->arch.model.fac);
+out_nofac:
 	debug_unregister(kvm->arch.dbf);
 	debug_unregister(kvm->arch.dbf);
 out_nodbf:
 out_nodbf:
 	free_page((unsigned long)(kvm->arch.sca));
 	free_page((unsigned long)(kvm->arch.sca));
@@ -794,6 +1008,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
 void kvm_arch_destroy_vm(struct kvm *kvm)
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 {
 	kvm_free_vcpus(kvm);
 	kvm_free_vcpus(kvm);
+	free_page((unsigned long)kvm->arch.model.fac);
 	free_page((unsigned long)(kvm->arch.sca));
 	free_page((unsigned long)(kvm->arch.sca));
 	debug_unregister(kvm->arch.dbf);
 	debug_unregister(kvm->arch.dbf);
 	kfree(kvm->arch.crypto.crycb);
 	kfree(kvm->arch.crypto.crycb);
@@ -889,7 +1104,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 
 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
 {
 {
-	if (!test_vfacility(76))
+	if (!test_kvm_facility(vcpu->kvm, 76))
 		return;
 		return;
 
 
 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
@@ -928,7 +1143,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 						    CPUSTAT_STOPPED |
 						    CPUSTAT_STOPPED |
 						    CPUSTAT_GED);
 						    CPUSTAT_GED);
 	vcpu->arch.sie_block->ecb   = 6;
 	vcpu->arch.sie_block->ecb   = 6;
-	if (test_vfacility(50) && test_vfacility(73))
+	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
 		vcpu->arch.sie_block->ecb |= 0x10;
 		vcpu->arch.sie_block->ecb |= 0x10;
 
 
 	vcpu->arch.sie_block->ecb2  = 8;
 	vcpu->arch.sie_block->ecb2  = 8;
@@ -937,7 +1152,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 		vcpu->arch.sie_block->eca |= 1;
 		vcpu->arch.sie_block->eca |= 1;
 	if (sclp_has_sigpif())
 	if (sclp_has_sigpif())
 		vcpu->arch.sie_block->eca |= 0x10000000U;
 		vcpu->arch.sie_block->eca |= 0x10000000U;
-	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
 				      ICTL_TPROT;
 				      ICTL_TPROT;
 
 
@@ -948,8 +1162,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	}
 	}
 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
-	get_cpu_id(&vcpu->arch.cpu_id);
-	vcpu->arch.cpu_id.version = 0xff;
+
+	mutex_lock(&vcpu->kvm->lock);
+	vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
+	memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
+	       S390_ARCH_FAC_LIST_SIZE_BYTE);
+	vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
+	mutex_unlock(&vcpu->kvm->lock);
 
 
 	kvm_s390_vcpu_crypto_setup(vcpu);
 	kvm_s390_vcpu_crypto_setup(vcpu);
 
 
@@ -993,6 +1212,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
 	}
 	}
+	vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
 
 
 	spin_lock_init(&vcpu->arch.local_int.lock);
 	spin_lock_init(&vcpu->arch.local_int.lock);
 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
@@ -2058,30 +2278,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 
 
 static int __init kvm_s390_init(void)
 static int __init kvm_s390_init(void)
 {
 {
-	int ret;
-	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-	if (ret)
-		return ret;
-
-	/*
-	 * guests can ask for up to 255+1 double words, we need a full page
-	 * to hold the maximum amount of facilities. On the other hand, we
-	 * only set facilities that are known to work in KVM.
-	 */
-	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
-	if (!vfacilities) {
-		kvm_exit();
-		return -ENOMEM;
-	}
-	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
-	vfacilities[0] &= 0xff82fffbf47c2000UL;
-	vfacilities[1] &= 0x005c000000000000UL;
-	return 0;
+	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
 }
 }
 
 
 static void __exit kvm_s390_exit(void)
 static void __exit kvm_s390_exit(void)
 {
 {
-	free_page((unsigned long) vfacilities);
 	kvm_exit();
 	kvm_exit();
 }
 }
 
 

+ 9 - 4
arch/s390/kvm/kvm-s390.h

@@ -18,12 +18,10 @@
 #include <linux/hrtimer.h>
 #include <linux/hrtimer.h>
 #include <linux/kvm.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/kvm_host.h>
+#include <asm/facility.h>
 
 
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 
 
-/* declare vfacilities extern */
-extern unsigned long *vfacilities;
-
 /* Transactional Memory Execution related macros */
 /* Transactional Memory Execution related macros */
 #define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & 0x10))
 #define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & 0x10))
 #define TDB_FORMAT1		1
 #define TDB_FORMAT1		1
@@ -127,6 +125,12 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
 	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
 	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
 }
 }
 
 
+/* test availability of facility in a kvm intance */
+static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
+{
+	return __test_facility(nr, kvm->arch.model.fac->kvm);
+}
+
 /* are cpu states controlled by user space */
 /* are cpu states controlled by user space */
 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
 {
 {
@@ -183,7 +187,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
 /* is cmma enabled */
 /* is cmma enabled */
 bool kvm_s390_cmma_enabled(struct kvm *kvm);
 bool kvm_s390_cmma_enabled(struct kvm *kvm);
-int test_vfacility(unsigned long nr);
+unsigned long kvm_s390_fac_list_mask_size(void);
+extern unsigned long kvm_s390_fac_list_mask[];
 
 
 /* implemented in diag.c */
 /* implemented in diag.c */
 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);

+ 9 - 4
arch/s390/kvm/priv.c

@@ -337,19 +337,24 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
 static int handle_stfl(struct kvm_vcpu *vcpu)
 static int handle_stfl(struct kvm_vcpu *vcpu)
 {
 {
 	int rc;
 	int rc;
+	unsigned int fac;
 
 
 	vcpu->stat.instruction_stfl++;
 	vcpu->stat.instruction_stfl++;
 
 
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
 
+	/*
+	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
+	 * into a u32 memory representation. They will remain bits 0-31.
+	 */
+	fac = *vcpu->kvm->arch.model.fac->sie >> 32;
 	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
 	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
-			    vfacilities, 4);
+			    &fac, sizeof(fac));
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
-	VCPU_EVENT(vcpu, 5, "store facility list value %x",
-		   *(unsigned int *) vfacilities);
-	trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
+	VCPU_EVENT(vcpu, 5, "store facility list value %x", fac);
+	trace_kvm_s390_handle_stfl(vcpu, fac);
 	return 0;
 	return 0;
 }
 }
 
 

+ 4 - 0
virt/kvm/Kconfig

@@ -43,3 +43,7 @@ config HAVE_KVM_ARCH_TLB_FLUSH_ALL
 
 
 config KVM_GENERIC_DIRTYLOG_READ_PROTECT
 config KVM_GENERIC_DIRTYLOG_READ_PROTECT
        bool
        bool
+
+config KVM_COMPAT
+       def_bool y
+       depends on COMPAT && !S390

+ 6 - 6
virt/kvm/kvm_main.c

@@ -92,7 +92,7 @@ struct dentry *kvm_debugfs_dir;
 
 
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 			   unsigned long arg);
 			   unsigned long arg);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
 				  unsigned long arg);
 				  unsigned long arg);
 #endif
 #endif
@@ -2052,7 +2052,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
 static struct file_operations kvm_vcpu_fops = {
 static struct file_operations kvm_vcpu_fops = {
 	.release        = kvm_vcpu_release,
 	.release        = kvm_vcpu_release,
 	.unlocked_ioctl = kvm_vcpu_ioctl,
 	.unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
 	.compat_ioctl   = kvm_vcpu_compat_ioctl,
 	.compat_ioctl   = kvm_vcpu_compat_ioctl,
 #endif
 #endif
 	.mmap           = kvm_vcpu_mmap,
 	.mmap           = kvm_vcpu_mmap,
@@ -2342,7 +2342,7 @@ out:
 	return r;
 	return r;
 }
 }
 
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
 static long kvm_vcpu_compat_ioctl(struct file *filp,
 static long kvm_vcpu_compat_ioctl(struct file *filp,
 				  unsigned int ioctl, unsigned long arg)
 				  unsigned int ioctl, unsigned long arg)
 {
 {
@@ -2434,7 +2434,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
 
 
 static const struct file_operations kvm_device_fops = {
 static const struct file_operations kvm_device_fops = {
 	.unlocked_ioctl = kvm_device_ioctl,
 	.unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
 	.compat_ioctl = kvm_device_ioctl,
 	.compat_ioctl = kvm_device_ioctl,
 #endif
 #endif
 	.release = kvm_device_release,
 	.release = kvm_device_release,
@@ -2721,7 +2721,7 @@ out:
 	return r;
 	return r;
 }
 }
 
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
 struct compat_kvm_dirty_log {
 struct compat_kvm_dirty_log {
 	__u32 slot;
 	__u32 slot;
 	__u32 padding1;
 	__u32 padding1;
@@ -2768,7 +2768,7 @@ out:
 static struct file_operations kvm_vm_fops = {
 static struct file_operations kvm_vm_fops = {
 	.release        = kvm_vm_release,
 	.release        = kvm_vm_release,
 	.unlocked_ioctl = kvm_vm_ioctl,
 	.unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_KVM_COMPAT
 	.compat_ioctl   = kvm_vm_compat_ioctl,
 	.compat_ioctl   = kvm_vm_compat_ioctl,
 #endif
 #endif
 	.llseek		= noop_llseek,
 	.llseek		= noop_llseek,