|
@@ -6240,6 +6240,83 @@ e_free:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
|
|
|
+ unsigned long __user vaddr,
|
|
|
+ unsigned long dst_paddr,
|
|
|
+ unsigned long __user dst_vaddr,
|
|
|
+ int size, int *error)
|
|
|
+{
|
|
|
+ struct page *src_tpage = NULL;
|
|
|
+ struct page *dst_tpage = NULL;
|
|
|
+ int ret, len = size;
|
|
|
+
|
|
|
+ /* If source buffer is not aligned then use an intermediate buffer */
|
|
|
+ if (!IS_ALIGNED(vaddr, 16)) {
|
|
|
+ src_tpage = alloc_page(GFP_KERNEL);
|
|
|
+ if (!src_tpage)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ if (copy_from_user(page_address(src_tpage),
|
|
|
+ (void __user *)(uintptr_t)vaddr, size)) {
|
|
|
+ __free_page(src_tpage);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ paddr = __sme_page_pa(src_tpage);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If destination buffer or length is not aligned then do read-modify-write:
|
|
|
+ * - decrypt destination in an intermediate buffer
|
|
|
+ * - copy the source buffer in an intermediate buffer
|
|
|
+ * - use the intermediate buffer as source buffer
|
|
|
+ */
|
|
|
+ if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
|
|
|
+ int dst_offset;
|
|
|
+
|
|
|
+ dst_tpage = alloc_page(GFP_KERNEL);
|
|
|
+ if (!dst_tpage) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto e_free;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = __sev_dbg_decrypt(kvm, dst_paddr,
|
|
|
+ __sme_page_pa(dst_tpage), size, error);
|
|
|
+ if (ret)
|
|
|
+ goto e_free;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If source is kernel buffer then use memcpy() otherwise
|
|
|
+ * copy_from_user().
|
|
|
+ */
|
|
|
+ dst_offset = dst_paddr & 15;
|
|
|
+
|
|
|
+ if (src_tpage)
|
|
|
+ memcpy(page_address(dst_tpage) + dst_offset,
|
|
|
+ page_address(src_tpage), size);
|
|
|
+ else {
|
|
|
+ if (copy_from_user(page_address(dst_tpage) + dst_offset,
|
|
|
+ (void __user *)(uintptr_t)vaddr, size)) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ goto e_free;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ paddr = __sme_page_pa(dst_tpage);
|
|
|
+ dst_paddr = round_down(dst_paddr, 16);
|
|
|
+ len = round_up(size, 16);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
|
|
|
+
|
|
|
+e_free:
|
|
|
+ if (src_tpage)
|
|
|
+ __free_page(src_tpage);
|
|
|
+ if (dst_tpage)
|
|
|
+ __free_page(dst_tpage);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
|
|
|
{
|
|
|
unsigned long vaddr, vaddr_end, next_vaddr;
|
|
@@ -6292,11 +6369,19 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
|
|
|
d_off = dst_vaddr & ~PAGE_MASK;
|
|
|
len = min_t(size_t, (PAGE_SIZE - s_off), size);
|
|
|
|
|
|
- ret = __sev_dbg_decrypt_user(kvm,
|
|
|
- __sme_page_pa(src_p[0]) + s_off,
|
|
|
- dst_vaddr,
|
|
|
- __sme_page_pa(dst_p[0]) + d_off,
|
|
|
- len, &argp->error);
|
|
|
+ if (dec)
|
|
|
+ ret = __sev_dbg_decrypt_user(kvm,
|
|
|
+ __sme_page_pa(src_p[0]) + s_off,
|
|
|
+ dst_vaddr,
|
|
|
+ __sme_page_pa(dst_p[0]) + d_off,
|
|
|
+ len, &argp->error);
|
|
|
+ else
|
|
|
+ ret = __sev_dbg_encrypt_user(kvm,
|
|
|
+ __sme_page_pa(src_p[0]) + s_off,
|
|
|
+ vaddr,
|
|
|
+ __sme_page_pa(dst_p[0]) + d_off,
|
|
|
+ dst_vaddr,
|
|
|
+ len, &argp->error);
|
|
|
|
|
|
sev_unpin_memory(kvm, src_p, 1);
|
|
|
sev_unpin_memory(kvm, dst_p, 1);
|
|
@@ -6347,6 +6432,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
|
|
case KVM_SEV_DBG_DECRYPT:
|
|
|
r = sev_dbg_crypt(kvm, &sev_cmd, true);
|
|
|
break;
|
|
|
+ case KVM_SEV_DBG_ENCRYPT:
|
|
|
+ r = sev_dbg_crypt(kvm, &sev_cmd, false);
|
|
|
+ break;
|
|
|
default:
|
|
|
r = -EINVAL;
|
|
|
goto out;
|