|
@@ -211,8 +211,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
|
|
|
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
|
|
|
-void s390_vcpu_block(struct kvm_vcpu *vcpu);
|
|
|
-void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
|
|
|
+void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
|
|
|
+void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
|
|
|
void exit_sie(struct kvm_vcpu *vcpu);
|
|
|
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
|
|
|
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
|
|
@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
|
|
|
int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_s390_pgm_info *pgm_info);
|
|
|
|
|
|
+static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct kvm_vcpu *vcpu;
|
|
|
+
|
|
|
+ WARN_ON(!mutex_is_locked(&kvm->lock));
|
|
|
+ kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
+ kvm_s390_vcpu_block(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct kvm_vcpu *vcpu;
|
|
|
+
|
|
|
+ kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
+ kvm_s390_vcpu_unblock(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* kvm_s390_inject_prog_cond - conditionally inject a program check
|
|
|
* @vcpu: virtual cpu
|