|
@@ -1972,30 +1972,38 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
|
|
|
|
|
|
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
|
- void *data, unsigned long len)
|
|
|
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
|
+ void *data, int offset, unsigned long len)
|
|
|
{
|
|
|
struct kvm_memslots *slots = kvm_memslots(kvm);
|
|
|
int r;
|
|
|
+ gpa_t gpa = ghc->gpa + offset;
|
|
|
|
|
|
- BUG_ON(len > ghc->len);
|
|
|
+ BUG_ON(len + offset > ghc->len);
|
|
|
|
|
|
if (slots->generation != ghc->generation)
|
|
|
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
|
|
|
|
|
|
if (unlikely(!ghc->memslot))
|
|
|
- return kvm_write_guest(kvm, ghc->gpa, data, len);
|
|
|
+ return kvm_write_guest(kvm, gpa, data, len);
|
|
|
|
|
|
if (kvm_is_error_hva(ghc->hva))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- r = __copy_to_user((void __user *)ghc->hva, data, len);
|
|
|
+ r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
|
|
|
if (r)
|
|
|
return -EFAULT;
|
|
|
- mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
|
|
|
+ mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
|
|
|
+
|
|
|
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
|
+ void *data, unsigned long len)
|
|
|
+{
|
|
|
+ return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
|
|
|
|
|
|
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|