|
@@ -130,7 +130,7 @@ static inline bool is_error_page(struct page *page)
|
|
|
#define KVM_REQUEST_ARCH_BASE 8
|
|
|
|
|
|
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
|
|
|
- BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \
|
|
|
+ BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
|
|
|
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
|
|
|
})
|
|
|
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
|
|
@@ -224,7 +224,7 @@ struct kvm_vcpu {
|
|
|
int vcpu_id;
|
|
|
int srcu_idx;
|
|
|
int mode;
|
|
|
- unsigned long requests;
|
|
|
+ u64 requests;
|
|
|
unsigned long guest_debug;
|
|
|
|
|
|
int pre_pcpu;
|
|
@@ -1131,7 +1131,7 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
|
|
|
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
- set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
|
|
+ set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
|
|
|
}
|
|
|
|
|
|
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
|
|
@@ -1141,12 +1141,12 @@ static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
|
|
+ return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
|
|
|
}
|
|
|
|
|
|
static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
|
|
|
+ clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
|
|
|
}
|
|
|
|
|
|
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
|