|
@@ -28,7 +28,11 @@ struct vsie_page {
|
|
|
* the same offset as that in struct sie_page!
|
|
|
*/
|
|
|
struct mcck_volatile_info mcck_info; /* 0x0200 */
|
|
|
- /* the pinned originial scb */
|
|
|
+ /*
|
|
|
+ * The pinned original scb. Be aware that other VCPUs can modify
|
|
|
+ * it while we read from it. Values that are used for conditions or
|
|
|
+ * are reused conditionally, should be accessed via READ_ONCE.
|
|
|
+ */
|
|
|
struct kvm_s390_sie_block *scb_o; /* 0x0218 */
|
|
|
/* the shadow gmap in use by the vsie_page */
|
|
|
struct gmap *gmap; /* 0x0220 */
|
|
@@ -140,12 +144,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
{
|
|
|
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
|
|
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
|
|
|
- u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U;
|
|
|
+ const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
|
|
|
+ const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
|
|
|
unsigned long *b1, *b2;
|
|
|
u8 ecb3_flags;
|
|
|
|
|
|
scb_s->crycbd = 0;
|
|
|
- if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
|
|
|
+ if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
|
|
|
return 0;
|
|
|
/* format-1 is supported with message-security-assist extension 3 */
|
|
|
if (!test_kvm_facility(vcpu->kvm, 76))
|
|
@@ -183,12 +188,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
{
|
|
|
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
|
|
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
|
|
|
+ /* READ_ONCE does not work on bitfields - use a temporary variable */
|
|
|
+ const uint32_t __new_ibc = scb_o->ibc;
|
|
|
+ const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
|
|
|
__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
|
|
|
|
|
|
scb_s->ibc = 0;
|
|
|
/* ibc installed in g2 and requested for g3 */
|
|
|
- if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) {
|
|
|
- scb_s->ibc = scb_o->ibc & 0x0fffU;
|
|
|
+ if (vcpu->kvm->arch.model.ibc && new_ibc) {
|
|
|
+ scb_s->ibc = new_ibc;
|
|
|
/* takte care of the minimum ibc level of the machine */
|
|
|
if (scb_s->ibc < min_ibc)
|
|
|
scb_s->ibc = min_ibc;
|
|
@@ -253,6 +261,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
{
|
|
|
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
|
|
|
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
|
|
+ /* READ_ONCE does not work on bitfields - use a temporary variable */
|
|
|
+ const uint32_t __new_prefix = scb_o->prefix;
|
|
|
+ const uint32_t new_prefix = READ_ONCE(__new_prefix);
|
|
|
+ const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
|
|
|
bool had_tx = scb_s->ecb & ECB_TE;
|
|
|
unsigned long new_mso = 0;
|
|
|
int rc;
|
|
@@ -299,14 +311,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
scb_s->icpua = scb_o->icpua;
|
|
|
|
|
|
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
|
|
|
- new_mso = scb_o->mso & 0xfffffffffff00000UL;
|
|
|
+ new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
|
|
|
/* if the hva of the prefix changes, we have to remap the prefix */
|
|
|
- if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
|
|
|
+ if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
|
|
|
prefix_unmapped(vsie_page);
|
|
|
/* SIE will do mso/msl validity and exception checks for us */
|
|
|
scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
|
|
|
scb_s->mso = new_mso;
|
|
|
- scb_s->prefix = scb_o->prefix;
|
|
|
+ scb_s->prefix = new_prefix;
|
|
|
|
|
|
/* We have to definetly flush the tlb if this scb never ran */
|
|
|
if (scb_s->ihcpu != 0xffffU)
|
|
@@ -318,11 +330,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
|
|
|
scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
|
|
|
/* transactional execution */
|
|
|
- if (test_kvm_facility(vcpu->kvm, 73)) {
|
|
|
+ if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
|
|
|
/* remap the prefix is tx is toggled on */
|
|
|
- if ((scb_o->ecb & ECB_TE) && !had_tx)
|
|
|
+ if (!had_tx)
|
|
|
prefix_unmapped(vsie_page);
|
|
|
- scb_s->ecb |= scb_o->ecb & ECB_TE;
|
|
|
+ scb_s->ecb |= ECB_TE;
|
|
|
}
|
|
|
/* SIMD */
|
|
|
if (test_kvm_facility(vcpu->kvm, 129)) {
|
|
@@ -529,9 +541,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
gpa_t gpa;
|
|
|
int rc = 0;
|
|
|
|
|
|
- gpa = scb_o->scaol & ~0xfUL;
|
|
|
+ gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
|
|
|
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
|
|
|
- gpa |= (u64) scb_o->scaoh << 32;
|
|
|
+ gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
|
|
|
if (gpa) {
|
|
|
if (!(gpa & ~0x1fffUL))
|
|
|
rc = set_validity_icpt(scb_s, 0x0038U);
|
|
@@ -551,7 +563,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
scb_s->scaol = (u32)(u64)hpa;
|
|
|
}
|
|
|
|
|
|
- gpa = scb_o->itdba & ~0xffUL;
|
|
|
+ gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
|
|
|
if (gpa && (scb_s->ecb & ECB_TE)) {
|
|
|
if (!(gpa & ~0x1fffU)) {
|
|
|
rc = set_validity_icpt(scb_s, 0x0080U);
|
|
@@ -566,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
scb_s->itdba = hpa;
|
|
|
}
|
|
|
|
|
|
- gpa = scb_o->gvrd & ~0x1ffUL;
|
|
|
+ gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
|
|
|
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
|
|
|
if (!(gpa & ~0x1fffUL)) {
|
|
|
rc = set_validity_icpt(scb_s, 0x1310U);
|
|
@@ -584,7 +596,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
scb_s->gvrd = hpa;
|
|
|
}
|
|
|
|
|
|
- gpa = scb_o->riccbd & ~0x3fUL;
|
|
|
+ gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
|
|
|
if (gpa && (scb_s->ecb3 & ECB3_RI)) {
|
|
|
if (!(gpa & ~0x1fffUL)) {
|
|
|
rc = set_validity_icpt(scb_s, 0x0043U);
|
|
@@ -602,8 +614,8 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
|
|
|
unsigned long sdnxc;
|
|
|
|
|
|
- gpa = scb_o->sdnxo & ~0xfUL;
|
|
|
- sdnxc = scb_o->sdnxo & 0xfUL;
|
|
|
+ gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
|
|
|
+ sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
|
|
|
if (!gpa || !(gpa & ~0x1fffUL)) {
|
|
|
rc = set_validity_icpt(scb_s, 0x10b0U);
|
|
|
goto unpin;
|
|
@@ -768,7 +780,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page)
|
|
|
static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
|
{
|
|
|
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
|
|
- __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U;
|
|
|
+ __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
|
|
|
|
|
|
if (fac && test_kvm_facility(vcpu->kvm, 7)) {
|
|
|
retry_vsie_icpt(vsie_page);
|