|
@@ -1513,6 +1513,94 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+unsigned long shmem_get_unmapped_area(struct file *file,
|
|
|
+ unsigned long uaddr, unsigned long len,
|
|
|
+ unsigned long pgoff, unsigned long flags)
|
|
|
+{
|
|
|
+ unsigned long (*get_area)(struct file *,
|
|
|
+ unsigned long, unsigned long, unsigned long, unsigned long);
|
|
|
+ unsigned long addr;
|
|
|
+ unsigned long offset;
|
|
|
+ unsigned long inflated_len;
|
|
|
+ unsigned long inflated_addr;
|
|
|
+ unsigned long inflated_offset;
|
|
|
+
|
|
|
+ if (len > TASK_SIZE)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ get_area = current->mm->get_unmapped_area;
|
|
|
+ addr = get_area(file, uaddr, len, pgoff, flags);
|
|
|
+
|
|
|
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
|
|
|
+ return addr;
|
|
|
+ if (IS_ERR_VALUE(addr))
|
|
|
+ return addr;
|
|
|
+ if (addr & ~PAGE_MASK)
|
|
|
+ return addr;
|
|
|
+ if (addr > TASK_SIZE - len)
|
|
|
+ return addr;
|
|
|
+
|
|
|
+ if (shmem_huge == SHMEM_HUGE_DENY)
|
|
|
+ return addr;
|
|
|
+ if (len < HPAGE_PMD_SIZE)
|
|
|
+ return addr;
|
|
|
+ if (flags & MAP_FIXED)
|
|
|
+ return addr;
|
|
|
+ /*
|
|
|
+ * Our priority is to support MAP_SHARED mapped hugely;
|
|
|
+ * and support MAP_PRIVATE mapped hugely too, until it is COWed.
|
|
|
+ * But if caller specified an address hint, respect that as before.
|
|
|
+ */
|
|
|
+ if (uaddr)
|
|
|
+ return addr;
|
|
|
+
|
|
|
+ if (shmem_huge != SHMEM_HUGE_FORCE) {
|
|
|
+ struct super_block *sb;
|
|
|
+
|
|
|
+ if (file) {
|
|
|
+ VM_BUG_ON(file->f_op != &shmem_file_operations);
|
|
|
+ sb = file_inode(file)->i_sb;
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * Called directly from mm/mmap.c, or drivers/char/mem.c
|
|
|
+ * for "/dev/zero", to create a shared anonymous object.
|
|
|
+ */
|
|
|
+ if (IS_ERR(shm_mnt))
|
|
|
+ return addr;
|
|
|
+ sb = shm_mnt->mnt_sb;
|
|
|
+ }
|
|
|
+ if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
|
|
|
+ return addr;
|
|
|
+ }
|
|
|
+
|
|
|
+ offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
|
|
|
+ if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
|
|
|
+ return addr;
|
|
|
+ if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
|
|
|
+ return addr;
|
|
|
+
|
|
|
+ inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
|
|
|
+ if (inflated_len > TASK_SIZE)
|
|
|
+ return addr;
|
|
|
+ if (inflated_len < len)
|
|
|
+ return addr;
|
|
|
+
|
|
|
+ inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
|
|
|
+ if (IS_ERR_VALUE(inflated_addr))
|
|
|
+ return addr;
|
|
|
+ if (inflated_addr & ~PAGE_MASK)
|
|
|
+ return addr;
|
|
|
+
|
|
|
+ inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
|
|
|
+ inflated_addr += offset - inflated_offset;
|
|
|
+ if (inflated_offset > offset)
|
|
|
+ inflated_addr += HPAGE_PMD_SIZE;
|
|
|
+
|
|
|
+ if (inflated_addr > TASK_SIZE - len)
|
|
|
+ return addr;
|
|
|
+ return inflated_addr;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA
|
|
|
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
|
|
|
{
|
|
@@ -3261,6 +3349,7 @@ static const struct address_space_operations shmem_aops = {
|
|
|
|
|
|
static const struct file_operations shmem_file_operations = {
|
|
|
.mmap = shmem_mmap,
|
|
|
+ .get_unmapped_area = shmem_get_unmapped_area,
|
|
|
#ifdef CONFIG_TMPFS
|
|
|
.llseek = shmem_file_llseek,
|
|
|
.read_iter = shmem_file_read_iter,
|
|
@@ -3496,6 +3585,15 @@ void shmem_unlock_mapping(struct address_space *mapping)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_MMU
|
|
|
+unsigned long shmem_get_unmapped_area(struct file *file,
|
|
|
+ unsigned long addr, unsigned long len,
|
|
|
+ unsigned long pgoff, unsigned long flags)
|
|
|
+{
|
|
|
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
|
|
{
|
|
|
truncate_inode_pages_range(inode->i_mapping, lstart, lend);
|