|
@@ -2,8 +2,10 @@
|
|
|
/*
|
|
|
* KVM guest address space mapping code
|
|
|
*
|
|
|
- * Copyright IBM Corp. 2007, 2016
|
|
|
+ * Copyright IBM Corp. 2007, 2016, 2018
|
|
|
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
|
+ * David Hildenbrand <david@redhat.com>
|
|
|
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
|
|
|
*/
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
@@ -588,8 +590,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
|
|
return -EFAULT;
|
|
|
pmd = pmd_offset(pud, vmaddr);
|
|
|
VM_BUG_ON(pmd_none(*pmd));
|
|
|
- /* large pmds cannot yet be handled */
|
|
|
- if (pmd_large(*pmd))
|
|
|
+ /* Are we allowed to use huge pages? */
|
|
|
+ if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
|
|
|
return -EFAULT;
|
|
|
/* Link gmap segment table entry location to page table. */
|
|
|
rc = radix_tree_preload(GFP_KERNEL);
|
|
@@ -1632,6 +1634,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
|
|
|
unsigned long limit;
|
|
|
int rc;
|
|
|
|
|
|
+ BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
|
|
|
BUG_ON(gmap_is_shadow(parent));
|
|
|
spin_lock(&parent->shadow_lock);
|
|
|
sg = gmap_find_shadow(parent, asce, edat_level);
|