瀏覽代碼

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

 - almost all of the rest of MM

 - misc bits

 - KASAN updates

 - procfs

 - lib/ updates

 - checkpatch updates

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (124 commits)
  checkpatch: remove false unbalanced braces warning
  checkpatch: notice unbalanced else braces in a patch
  checkpatch: add another old address for the FSF
  checkpatch: update $logFunctions
  checkpatch: warn on logging continuations
  checkpatch: warn on embedded function names
  lib/lz4: remove back-compat wrappers
  fs/pstore: fs/squashfs: change usage of LZ4 to work with new LZ4 version
  crypto: change LZ4 modules to work with new LZ4 module version
  lib/decompress_unlz4: change module to work with new LZ4 module version
  lib: update LZ4 compressor module
  lib/test_sort.c: make it explicitly non-modular
  lib: add CONFIG_TEST_SORT to enable self-test of sort()
  rbtree: use designated initializers
  linux/kernel.h: fix DIV_ROUND_CLOSEST to support negative divisors
  lib/find_bit.c: micro-optimise find_next_*_bit
  lib: add module support to atomic64 tests
  lib: add module support to glob tests
  lib: add module support to crc32 tests
  kernel/ksysfs.c: add __ro_after_init to bin_attribute structure
  ...
Linus Torvalds 8 年之前
父節點
當前提交
7b46588f36
共有 100 個文件被更改,包括 892 次插入384 次删除
  1. 3 3
      Documentation/blockdev/zram.txt
  2. 2 2
      Documentation/sysctl/vm.txt
  3. 18 0
      Documentation/vm/ksm.txt
  4. 89 0
      Documentation/vm/userfaultfd.txt
  5. 3 0
      arch/Kconfig
  6. 1 0
      arch/alpha/include/asm/Kbuild
  7. 0 9
      arch/alpha/include/asm/current.h
  8. 9 7
      arch/arm/mm/dma-mapping.c
  9. 2 2
      arch/arm64/mm/dma-mapping.c
  10. 1 0
      arch/cris/include/asm/Kbuild
  11. 0 15
      arch/cris/include/asm/current.h
  12. 8 3
      arch/frv/mb93090-mb00/pci-frv.c
  13. 3 1
      arch/m68k/68000/bootlogo-vz.h
  14. 3 1
      arch/m68k/68000/bootlogo.h
  15. 2 1
      arch/m68k/include/asm/MC68328.h
  16. 2 1
      arch/m68k/include/asm/MC68EZ328.h
  17. 1 1
      arch/m68k/include/asm/MC68VZ328.h
  18. 2 1
      arch/m68k/include/asm/natfeat.h
  19. 4 4
      arch/m68k/lib/ashldi3.c
  20. 4 4
      arch/m68k/lib/ashrdi3.c
  21. 4 4
      arch/m68k/lib/lshrdi3.c
  22. 4 4
      arch/m68k/lib/muldi3.c
  23. 1 0
      arch/microblaze/pci/pci-common.c
  24. 1 1
      arch/mips/kernel/vdso.c
  25. 2 2
      arch/mips/mm/dma-default.c
  26. 45 7
      arch/powerpc/include/asm/book3s/64/pgtable.h
  27. 1 0
      arch/powerpc/kernel/pci-common.c
  28. 2 2
      arch/powerpc/kvm/book3s_64_vio.c
  29. 2 1
      arch/powerpc/kvm/book3s_hv_builtin.c
  30. 19 20
      arch/powerpc/platforms/cell/spufs/file.c
  31. 1 0
      arch/s390/kernel/crash_dump.c
  32. 1 1
      arch/tile/mm/elf.c
  33. 1 0
      arch/x86/Kconfig
  34. 1 1
      arch/x86/entry/vdso/vma.c
  35. 11 0
      arch/x86/include/asm/paravirt.h
  36. 2 0
      arch/x86/include/asm/paravirt_types.h
  37. 17 0
      arch/x86/include/asm/pgtable-2level.h
  38. 30 0
      arch/x86/include/asm/pgtable-3level.h
  39. 140 0
      arch/x86/include/asm/pgtable.h
  40. 15 0
      arch/x86/include/asm/pgtable_64.h
  41. 1 0
      arch/x86/kernel/paravirt.c
  42. 2 1
      arch/x86/kernel/pci-dma.c
  43. 24 4
      arch/x86/mm/gup.c
  44. 2 2
      arch/x86/mm/mpx.c
  45. 31 0
      arch/x86/mm/pgtable.c
  46. 2 1
      arch/xtensa/kernel/pci-dma.c
  47. 9 14
      crypto/lz4.c
  48. 9 14
      crypto/lz4hc.c
  49. 102 40
      crypto/testmgr.h
  50. 1 1
      drivers/android/binder.c
  51. 5 0
      drivers/base/core.c
  52. 3 2
      drivers/base/dma-contiguous.c
  53. 1 1
      drivers/base/memory.c
  54. 63 64
      drivers/block/zram/zram_drv.c
  55. 6 6
      drivers/block/zram/zram_drv.h
  56. 2 3
      drivers/char/agp/alpha-agp.c
  57. 3 3
      drivers/char/mspec.c
  58. 69 23
      drivers/dax/dax.c
  59. 5 4
      drivers/gpu/drm/armada/armada_gem.c
  60. 9 27
      drivers/gpu/drm/drm_vm.c
  61. 1 1
      drivers/gpu/drm/etnaviv/etnaviv_drv.h
  62. 2 1
      drivers/gpu/drm/etnaviv/etnaviv_gem.c
  63. 2 1
      drivers/gpu/drm/exynos/exynos_drm_gem.c
  64. 1 1
      drivers/gpu/drm/exynos/exynos_drm_gem.h
  65. 2 1
      drivers/gpu/drm/gma500/framebuffer.c
  66. 2 1
      drivers/gpu/drm/gma500/gem.c
  67. 1 1
      drivers/gpu/drm/gma500/psb_drv.h
  68. 1 1
      drivers/gpu/drm/i915/i915_drv.h
  69. 2 2
      drivers/gpu/drm/i915/i915_gem.c
  70. 1 1
      drivers/gpu/drm/msm/msm_drv.h
  71. 2 1
      drivers/gpu/drm/msm/msm_gem.c
  72. 1 1
      drivers/gpu/drm/omapdrm/omap_drv.h
  73. 2 2
      drivers/gpu/drm/omapdrm/omap_gem.c
  74. 3 3
      drivers/gpu/drm/qxl/qxl_ttm.c
  75. 3 3
      drivers/gpu/drm/radeon/radeon_ttm.c
  76. 2 1
      drivers/gpu/drm/tegra/gem.c
  77. 5 5
      drivers/gpu/drm/ttm/ttm_bo_vm.c
  78. 1 1
      drivers/gpu/drm/udl/udl_drv.h
  79. 2 1
      drivers/gpu/drm/udl/udl_gem.c
  80. 2 1
      drivers/gpu/drm/vgem/vgem_drv.c
  81. 3 4
      drivers/gpu/drm/virtio/virtgpu_ttm.c
  82. 2 2
      drivers/hsi/clients/cmt_speech.c
  83. 3 3
      drivers/hwtracing/intel_th/msu.c
  84. 2 2
      drivers/infiniband/hw/hfi1/file_ops.c
  85. 1 1
      drivers/infiniband/hw/qib/qib_file_ops.c
  86. 1 1
      drivers/iommu/amd_iommu.c
  87. 1 1
      drivers/iommu/intel-iommu.c
  88. 2 1
      drivers/media/v4l2-core/videobuf-dma-sg.c
  89. 2 1
      drivers/misc/cxl/context.c
  90. 2 1
      drivers/misc/sgi-gru/grumain.c
  91. 1 1
      drivers/misc/sgi-gru/grutables.h
  92. 3 3
      drivers/scsi/cxlflash/superpipe.c
  93. 2 1
      drivers/scsi/sg.c
  94. 3 3
      drivers/staging/android/ion/ion.c
  95. 4 3
      drivers/staging/lustre/lustre/llite/llite_mmap.c
  96. 1 1
      drivers/staging/lustre/lustre/llite/vvp_io.c
  97. 3 3
      drivers/target/target_core_user.c
  98. 3 3
      drivers/uio/uio.c
  99. 2 2
      drivers/usb/mon/mon_bin.c
  100. 7 9
      drivers/video/fbdev/core/fb_defio.c

+ 3 - 3
Documentation/blockdev/zram.txt

@@ -201,8 +201,8 @@ File /sys/block/zram<id>/mm_stat
 The stat file represents device's mm statistics. It consists of a single
 The stat file represents device's mm statistics. It consists of a single
 line of text and contains the following stats separated by whitespace:
 line of text and contains the following stats separated by whitespace:
  orig_data_size   uncompressed size of data stored in this disk.
  orig_data_size   uncompressed size of data stored in this disk.
-                  This excludes zero-filled pages (zero_pages) since no
-                  memory is allocated for them.
+		  This excludes same-element-filled pages (same_pages) since
+		  no memory is allocated for them.
                   Unit: bytes
                   Unit: bytes
  compr_data_size  compressed size of data stored in this disk
  compr_data_size  compressed size of data stored in this disk
  mem_used_total   the amount of memory allocated for this disk. This
  mem_used_total   the amount of memory allocated for this disk. This
@@ -214,7 +214,7 @@ line of text and contains the following stats separated by whitespace:
                   the compressed data
                   the compressed data
  mem_used_max     the maximum amount of memory zram have consumed to
  mem_used_max     the maximum amount of memory zram have consumed to
                   store the data
                   store the data
- zero_pages       the number of zero filled pages written to this disk.
+ same_pages       the number of same element filled pages written to this disk.
                   No memory is allocated for such pages.
                   No memory is allocated for such pages.
  pages_compacted  the number of pages freed during compaction
  pages_compacted  the number of pages freed during compaction
 
 

+ 2 - 2
Documentation/sysctl/vm.txt

@@ -376,8 +376,8 @@ max_map_count:
 
 
 This file contains the maximum number of memory map areas a process
 This file contains the maximum number of memory map areas a process
 may have. Memory map areas are used as a side-effect of calling
 may have. Memory map areas are used as a side-effect of calling
-malloc, directly by mmap and mprotect, and also when loading shared
-libraries.
+malloc, directly by mmap, mprotect, and madvise, and also when loading
+shared libraries.
 
 
 While most applications need less than a thousand maps, certain
 While most applications need less than a thousand maps, certain
 programs, particularly malloc debuggers, may consume lots of them,
 programs, particularly malloc debuggers, may consume lots of them,

+ 18 - 0
Documentation/vm/ksm.txt

@@ -38,6 +38,10 @@ the range for whenever the KSM daemon is started; even if the range
 cannot contain any pages which KSM could actually merge; even if
 cannot contain any pages which KSM could actually merge; even if
 MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE.
 MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE.
 
 
+If a region of memory must be split into at least one new MADV_MERGEABLE
+or MADV_UNMERGEABLE region, the madvise may return ENOMEM if the process
+will exceed vm.max_map_count (see Documentation/sysctl/vm.txt).
+
 Like other madvise calls, they are intended for use on mapped areas of
 Like other madvise calls, they are intended for use on mapped areas of
 the user address space: they will report ENOMEM if the specified range
 the user address space: they will report ENOMEM if the specified range
 includes unmapped gaps (though working on the intervening mapped areas),
 includes unmapped gaps (though working on the intervening mapped areas),
@@ -80,6 +84,20 @@ run              - set 0 to stop ksmd from running but keep merged pages,
                    Default: 0 (must be changed to 1 to activate KSM,
                    Default: 0 (must be changed to 1 to activate KSM,
                                except if CONFIG_SYSFS is disabled)
                                except if CONFIG_SYSFS is disabled)
 
 
+use_zero_pages   - specifies whether empty pages (i.e. allocated pages
+                   that only contain zeroes) should be treated specially.
+                   When set to 1, empty pages are merged with the kernel
+                   zero page(s) instead of with each other as it would
+                   happen normally. This can improve the performance on
+                   architectures with coloured zero pages, depending on
+                   the workload. Care should be taken when enabling this
+                   setting, as it can potentially degrade the performance
+                   of KSM for some workloads, for example if the checksums
+                   of pages candidate for merging match the checksum of
+                   an empty page. This setting can be changed at any time,
+                   it is only effective for pages merged after the change.
+                   Default: 0 (normal KSM behaviour as in earlier releases)
+
 The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
 The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
 
 
 pages_shared     - how many shared pages are being used
 pages_shared     - how many shared pages are being used

+ 89 - 0
Documentation/vm/userfaultfd.txt

@@ -54,6 +54,26 @@ uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of
 respectively all the available features of the read(2) protocol and
 respectively all the available features of the read(2) protocol and
 the generic ioctl available.
 the generic ioctl available.
 
 
+The uffdio_api.features bitmask returned by the UFFDIO_API ioctl
+defines what memory types are supported by the userfaultfd and what
+events, except page fault notifications, may be generated.
+
+If the kernel supports registering userfaultfd ranges on hugetlbfs
+virtual memory areas, UFFD_FEATURE_MISSING_HUGETLBFS will be set in
+uffdio_api.features. Similarly, UFFD_FEATURE_MISSING_SHMEM will be
+set if the kernel supports registering userfaultfd ranges on shared
+memory (covering all shmem APIs, i.e. tmpfs, IPCSHM, /dev/zero
+MAP_SHARED, memfd_create, etc).
+
+The userland application that wants to use userfaultfd with hugetlbfs
+or shared memory need to set the corresponding flag in
+uffdio_api.features to enable those features.
+
+If the userland desires to receive notifications for events other than
+page faults, it has to verify that uffdio_api.features has appropriate
+UFFD_FEATURE_EVENT_* bits set. These events are described in more
+detail below in "Non-cooperative userfaultfd" section.
+
 Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should
 Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should
 be invoked (if present in the returned uffdio_api.ioctls bitmask) to
 be invoked (if present in the returned uffdio_api.ioctls bitmask) to
 register a memory range in the userfaultfd by setting the
 register a memory range in the userfaultfd by setting the
@@ -142,3 +162,72 @@ course the bitmap is updated accordingly. It's also useful to avoid
 sending the same page twice (in case the userfault is read by the
 sending the same page twice (in case the userfault is read by the
 postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration
 postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration
 thread).
 thread).
+
+== Non-cooperative userfaultfd ==
+
+When the userfaultfd is monitored by an external manager, the manager
+must be able to track changes in the process virtual memory
+layout. Userfaultfd can notify the manager about such changes using
+the same read(2) protocol as for the page fault notifications. The
+manager has to explicitly enable these events by setting appropriate
+bits in uffdio_api.features passed to UFFDIO_API ioctl:
+
+UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
+non-cooperative process. When the monitored process exits, the uffd
+manager will get UFFD_EVENT_EXIT.
+
+UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
+this feature is enabled, the userfaultfd context of the parent process
+is duplicated into the newly created process. The manager receives
+UFFD_EVENT_FORK with file descriptor of the new userfaultfd context in
+the uffd_msg.fork.
+
+UFFD_FEATURE_EVENT_REMAP - enable notifications about mremap()
+calls. When the non-cooperative process moves a virtual memory area to
+a different location, the manager will receive UFFD_EVENT_REMAP. The
+uffd_msg.remap will contain the old and new addresses of the area and
+its original length.
+
+UFFD_FEATURE_EVENT_REMOVE - enable notifications about
+madvise(MADV_REMOVE) and madvise(MADV_DONTNEED) calls. The event
+UFFD_EVENT_REMOVE will be generated upon these calls to madvise. The
+uffd_msg.remove will contain start and end addresses of the removed
+area.
+
+UFFD_FEATURE_EVENT_UNMAP - enable notifications about memory
+unmapping. The manager will get UFFD_EVENT_UNMAP with uffd_msg.remove
+containing start and end addresses of the unmapped area.
+
+Although the UFFD_FEATURE_EVENT_REMOVE and UFFD_FEATURE_EVENT_UNMAP
+are pretty similar, they quite differ in the action expected from the
+userfaultfd manager. In the former case, the virtual memory is
+removed, but the area is not, the area remains monitored by the
+userfaultfd, and if a page fault occurs in that area it will be
+delivered to the manager. The proper resolution for such page fault is
+to zeromap the faulting address. However, in the latter case, when an
+area is unmapped, either explicitly (with munmap() system call), or
+implicitly (e.g. during mremap()), the area is removed and in turn the
+userfaultfd context for such area disappears too and the manager will
+not get further userland page faults from the removed area. Still, the
+notification is required in order to prevent manager from using
+UFFDIO_COPY on the unmapped area.
+
+Unlike userland page faults which have to be synchronous and require
+explicit or implicit wakeup, all the events are delivered
+asynchronously and the non-cooperative process resumes execution as
+soon as manager executes read(). The userfaultfd manager should
+carefully synchronize calls to UFFDIO_COPY with the events
+processing. To aid the synchronization, the UFFDIO_COPY ioctl will
+return -ENOSPC when the monitored process exits at the time of
+UFFDIO_COPY, and -ENOENT, when the non-cooperative process has changed
+its virtual memory layout simultaneously with outstanding UFFDIO_COPY
+operation.
+
+The current asynchronous model of the event delivery is optimal for
+single threaded non-cooperative userfaultfd manager implementations. A
+synchronous event delivery model can be added later as a new
+userfaultfd feature to facilitate multithreading enhancements of the
+non cooperative manager, for example to allow UFFDIO_COPY ioctls to
+run in parallel to the event reception. Single threaded
+implementations should continue to use the current async event
+delivery model instead.

+ 3 - 0
arch/Kconfig

@@ -571,6 +571,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
 config HAVE_ARCH_TRANSPARENT_HUGEPAGE
 config HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	bool
 	bool
 
 
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	bool
+
 config HAVE_ARCH_HUGE_VMAP
 config HAVE_ARCH_HUGE_VMAP
 	bool
 	bool
 
 

+ 1 - 0
arch/alpha/include/asm/Kbuild

@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += sections.h
 generic-y += trace_clock.h
 generic-y += trace_clock.h
+generic-y += current.h

+ 0 - 9
arch/alpha/include/asm/current.h

@@ -1,9 +0,0 @@
-#ifndef _ALPHA_CURRENT_H
-#define _ALPHA_CURRENT_H
-
-#include <linux/thread_info.h>
-
-#define get_current()	(current_thread_info()->task)
-#define current		get_current()
-
-#endif /* _ALPHA_CURRENT_H */

+ 9 - 7
arch/arm/mm/dma-mapping.c

@@ -349,7 +349,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 				     pgprot_t prot, struct page **ret_page,
 				     pgprot_t prot, struct page **ret_page,
 				     const void *caller, bool want_vaddr,
 				     const void *caller, bool want_vaddr,
-				     int coherent_flag);
+				     int coherent_flag, gfp_t gfp);
 
 
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
 				 pgprot_t prot, struct page **ret_page,
 				 pgprot_t prot, struct page **ret_page,
@@ -420,7 +420,8 @@ static int __init atomic_pool_init(void)
 	 */
 	 */
 	if (dev_get_cma_area(NULL))
 	if (dev_get_cma_area(NULL))
 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
-				      &page, atomic_pool_init, true, NORMAL);
+				      &page, atomic_pool_init, true, NORMAL,
+				      GFP_KERNEL);
 	else
 	else
 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
 					   &page, atomic_pool_init, true);
 					   &page, atomic_pool_init, true);
@@ -594,14 +595,14 @@ static int __free_from_pool(void *start, size_t size)
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 				     pgprot_t prot, struct page **ret_page,
 				     pgprot_t prot, struct page **ret_page,
 				     const void *caller, bool want_vaddr,
 				     const void *caller, bool want_vaddr,
-				     int coherent_flag)
+				     int coherent_flag, gfp_t gfp)
 {
 {
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	size_t count = size >> PAGE_SHIFT;
 	size_t count = size >> PAGE_SHIFT;
 	struct page *page;
 	struct page *page;
 	void *ptr = NULL;
 	void *ptr = NULL;
 
 
-	page = dma_alloc_from_contiguous(dev, count, order);
+	page = dma_alloc_from_contiguous(dev, count, order, gfp);
 	if (!page)
 	if (!page)
 		return NULL;
 		return NULL;
 
 
@@ -655,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
 #define __get_dma_pgprot(attrs, prot)				__pgprot(0)
 #define __get_dma_pgprot(attrs, prot)				__pgprot(0)
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
 #define __alloc_from_pool(size, ret_page)			NULL
 #define __alloc_from_pool(size, ret_page)			NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag)	NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp)	NULL
 #define __free_from_pool(cpu_addr, size)			do { } while (0)
 #define __free_from_pool(cpu_addr, size)			do { } while (0)
 #define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
 #define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
 #define __dma_free_remap(cpu_addr, size)			do { } while (0)
 #define __dma_free_remap(cpu_addr, size)			do { } while (0)
@@ -697,7 +698,8 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
 {
 {
 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
 				       ret_page, args->caller,
 				       ret_page, args->caller,
-				       args->want_vaddr, args->coherent_flag);
+				       args->want_vaddr, args->coherent_flag,
+				       args->gfp);
 }
 }
 
 
 static void cma_allocator_free(struct arm_dma_free_args *args)
 static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -1312,7 +1314,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 		unsigned long order = get_order(size);
 		unsigned long order = get_order(size);
 		struct page *page;
 		struct page *page;
 
 
-		page = dma_alloc_from_contiguous(dev, count, order);
+		page = dma_alloc_from_contiguous(dev, count, order, gfp);
 		if (!page)
 		if (!page)
 			goto error;
 			goto error;
 
 

+ 2 - 2
arch/arm64/mm/dma-mapping.c

@@ -107,7 +107,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
 		void *addr;
 		void *addr;
 
 
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
-							get_order(size));
+						 get_order(size), flags);
 		if (!page)
 		if (!page)
 			return NULL;
 			return NULL;
 
 
@@ -390,7 +390,7 @@ static int __init atomic_pool_init(void)
 
 
 	if (dev_get_cma_area(NULL))
 	if (dev_get_cma_area(NULL))
 		page = dma_alloc_from_contiguous(NULL, nr_pages,
 		page = dma_alloc_from_contiguous(NULL, nr_pages,
-							pool_size_order);
+						 pool_size_order, GFP_KERNEL);
 	else
 	else
 		page = alloc_pages(GFP_DMA, pool_size_order);
 		page = alloc_pages(GFP_DMA, pool_size_order);
 
 

+ 1 - 0
arch/cris/include/asm/Kbuild

@@ -4,6 +4,7 @@ generic-y += barrier.h
 generic-y += bitsperlong.h
 generic-y += bitsperlong.h
 generic-y += clkdev.h
 generic-y += clkdev.h
 generic-y += cmpxchg.h
 generic-y += cmpxchg.h
+generic-y += current.h
 generic-y += device.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += div64.h
 generic-y += errno.h
 generic-y += errno.h

+ 0 - 15
arch/cris/include/asm/current.h

@@ -1,15 +0,0 @@
-#ifndef _CRIS_CURRENT_H
-#define _CRIS_CURRENT_H
-
-#include <linux/thread_info.h>
-
-struct task_struct;
-
-static inline struct task_struct * get_current(void)
-{
-        return current_thread_info()->task;
-}
- 
-#define current get_current()
-
-#endif /* !(_CRIS_CURRENT_H) */

+ 8 - 3
arch/frv/mb93090-mb00/pci-frv.c

@@ -147,7 +147,7 @@ static void __init pcibios_allocate_resources(int pass)
 static void __init pcibios_assign_resources(void)
 static void __init pcibios_assign_resources(void)
 {
 {
 	struct pci_dev *dev = NULL;
 	struct pci_dev *dev = NULL;
-	int idx;
+	int idx, err;
 	struct resource *r;
 	struct resource *r;
 
 
 	for_each_pci_dev(dev) {
 	for_each_pci_dev(dev) {
@@ -172,8 +172,13 @@ static void __init pcibios_assign_resources(void)
 			 *  the BIOS forgot to do so or because we have decided the old
 			 *  the BIOS forgot to do so or because we have decided the old
 			 *  address was unusable for some reason.
 			 *  address was unusable for some reason.
 			 */
 			 */
-			if (!r->start && r->end)
-				pci_assign_resource(dev, idx);
+			if (!r->start && r->end) {
+				err = pci_assign_resource(dev, idx);
+				if (err)
+					dev_err(&dev->dev,
+						"Failed to assign new address to %d\n",
+						idx);
+			}
 		}
 		}
 	}
 	}
 }
 }

+ 3 - 1
arch/m68k/68000/bootlogo-vz.h

@@ -1,6 +1,8 @@
+#include <linux/compiler.h>
+
 #define splash_width 640
 #define splash_width 640
 #define splash_height 480
 #define splash_height 480
-unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
+unsigned char __aligned(16) bootlogo_bits[] = {
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

+ 3 - 1
arch/m68k/68000/bootlogo.h

@@ -1,6 +1,8 @@
+#include <linux/compiler.h>
+
 #define bootlogo_width 160
 #define bootlogo_width 160
 #define bootlogo_height 160
 #define bootlogo_height 160
-unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
+unsigned char __aligned(16) bootlogo_bits[] = {
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

+ 2 - 1
arch/m68k/include/asm/MC68328.h

@@ -8,6 +8,7 @@
  * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
  * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
  *
  *
  */
  */
+#include <linux/compiler.h>
 
 
 #ifndef _MC68328_H_
 #ifndef _MC68328_H_
 #define _MC68328_H_
 #define _MC68328_H_
@@ -993,7 +994,7 @@ typedef volatile struct {
   volatile unsigned short int pad1;
   volatile unsigned short int pad1;
   volatile unsigned short int pad2;
   volatile unsigned short int pad2;
   volatile unsigned short int pad3;
   volatile unsigned short int pad3;
-} __attribute__((packed)) m68328_uart;
+} __packed m68328_uart;
 
 
 
 
 /**********
 /**********

+ 2 - 1
arch/m68k/include/asm/MC68EZ328.h

@@ -9,6 +9,7 @@
  *                     The Silver Hammer Group, Ltd.
  *                     The Silver Hammer Group, Ltd.
  *
  *
  */
  */
+#include <linux/compiler.h>
 
 
 #ifndef _MC68EZ328_H_
 #ifndef _MC68EZ328_H_
 #define _MC68EZ328_H_
 #define _MC68EZ328_H_
@@ -815,7 +816,7 @@ typedef volatile struct {
   volatile unsigned short int nipr;
   volatile unsigned short int nipr;
   volatile unsigned short int pad1;
   volatile unsigned short int pad1;
   volatile unsigned short int pad2;
   volatile unsigned short int pad2;
-} __attribute__((packed)) m68328_uart;
+} __packed m68328_uart;
 
 
 
 
 /**********
 /**********

+ 1 - 1
arch/m68k/include/asm/MC68VZ328.h

@@ -909,7 +909,7 @@ typedef struct {
   volatile unsigned short int nipr;
   volatile unsigned short int nipr;
   volatile unsigned short int hmark;
   volatile unsigned short int hmark;
   volatile unsigned short int unused;
   volatile unsigned short int unused;
-} __attribute__((packed)) m68328_uart;
+} __packed m68328_uart;
 
 
 
 
 
 

+ 2 - 1
arch/m68k/include/asm/natfeat.h

@@ -6,6 +6,7 @@
  * This software may be used and distributed according to the terms of
  * This software may be used and distributed according to the terms of
  * the GNU General Public License (GPL), incorporated herein by reference.
  * the GNU General Public License (GPL), incorporated herein by reference.
  */
  */
+#include <linux/compiler.h>
 
 
 #ifndef _NATFEAT_H
 #ifndef _NATFEAT_H
 #define _NATFEAT_H
 #define _NATFEAT_H
@@ -17,6 +18,6 @@ void nf_init(void);
 void nf_shutdown(void);
 void nf_shutdown(void);
 
 
 void nfprint(const char *fmt, ...)
 void nfprint(const char *fmt, ...)
-	__attribute__ ((format (printf, 1, 2)));
+	__printf(1, 2);
 
 
 # endif /* _NATFEAT_H */
 # endif /* _NATFEAT_H */

+ 4 - 4
arch/m68k/lib/ashldi3.c

@@ -18,10 +18,10 @@ GNU General Public License for more details. */
 
 
 #define BITS_PER_UNIT 8
 #define BITS_PER_UNIT 8
 
 
-typedef		 int SItype	__attribute__ ((mode (SI)));
-typedef unsigned int USItype	__attribute__ ((mode (SI)));
-typedef		 int DItype	__attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef		 int SItype	__mode(SI);
+typedef unsigned int USItype	__mode(SI);
+typedef		 int DItype	__mode(DI);
+typedef int word_type           __mode(__word__);
 
 
 struct DIstruct {SItype high, low;};
 struct DIstruct {SItype high, low;};
 
 

+ 4 - 4
arch/m68k/lib/ashrdi3.c

@@ -18,10 +18,10 @@ GNU General Public License for more details. */
 
 
 #define BITS_PER_UNIT 8
 #define BITS_PER_UNIT 8
 
 
-typedef		 int SItype	__attribute__ ((mode (SI)));
-typedef unsigned int USItype	__attribute__ ((mode (SI)));
-typedef		 int DItype	__attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef		 int SItype	__mode(SI);
+typedef unsigned int USItype	__mode(SI);
+typedef		 int DItype	__mode(DI);
+typedef int word_type           __mode(__word__);
 
 
 struct DIstruct {SItype high, low;};
 struct DIstruct {SItype high, low;};
 
 

+ 4 - 4
arch/m68k/lib/lshrdi3.c

@@ -18,10 +18,10 @@ GNU General Public License for more details. */
 
 
 #define BITS_PER_UNIT 8
 #define BITS_PER_UNIT 8
 
 
-typedef		 int SItype	__attribute__ ((mode (SI)));
-typedef unsigned int USItype	__attribute__ ((mode (SI)));
-typedef		 int DItype	__attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef		 int SItype	__mode(SI);
+typedef unsigned int USItype	__mode(SI);
+typedef		 int DItype	__mode(DI);
+typedef int word_type           __mode(__word__);
 
 
 struct DIstruct {SItype high, low;};
 struct DIstruct {SItype high, low;};
 
 

+ 4 - 4
arch/m68k/lib/muldi3.c

@@ -65,10 +65,10 @@ GNU General Public License for more details. */
     umul_ppmm (__w.s.high, __w.s.low, u, v);				\
     umul_ppmm (__w.s.high, __w.s.low, u, v);				\
     __w.ll; })
     __w.ll; })
 
 
-typedef 	 int SItype	__attribute__ ((mode (SI)));
-typedef unsigned int USItype	__attribute__ ((mode (SI)));
-typedef		 int DItype	__attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
+typedef 	 int SItype	__mode(SI);
+typedef unsigned int USItype	__mode(SI);
+typedef		 int DItype	__mode(DI);
+typedef int word_type           __mode(__word__);
 
 
 struct DIstruct {SItype high, low;};
 struct DIstruct {SItype high, low;};
 
 

+ 1 - 0
arch/microblaze/pci/pci-common.c

@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/bootmem.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
+#include <linux/shmem_fs.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
 #include <linux/irq.h>
 #include <linux/irq.h>

+ 1 - 1
arch/mips/kernel/vdso.c

@@ -111,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
 	base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
 			   VM_READ|VM_WRITE|VM_EXEC|
 			   VM_READ|VM_WRITE|VM_EXEC|
 			   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 			   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-			   0);
+			   0, NULL);
 	if (IS_ERR_VALUE(base)) {
 	if (IS_ERR_VALUE(base)) {
 		ret = base;
 		ret = base;
 		goto out;
 		goto out;

+ 2 - 2
arch/mips/mm/dma-default.c

@@ -148,8 +148,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 	gfp = massage_gfp_flags(dev, gfp);
 	gfp = massage_gfp_flags(dev, gfp);
 
 
 	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
 	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
-		page = dma_alloc_from_contiguous(dev,
-					count, get_order(size));
+		page = dma_alloc_from_contiguous(dev, count, get_order(size),
+						 gfp);
 	if (!page)
 	if (!page)
 		page = alloc_pages(gfp, get_order(size));
 		page = alloc_pages(gfp, get_order(size));
 
 

+ 45 - 7
arch/powerpc/include/asm/book3s/64/pgtable.h

@@ -1,6 +1,9 @@
 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
 
 
+#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
+#endif
 /*
 /*
  * Common bits between hash and Radix page table
  * Common bits between hash and Radix page table
  */
  */
@@ -434,15 +437,47 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 
 
 #ifdef CONFIG_NUMA_BALANCING
 #ifdef CONFIG_NUMA_BALANCING
-/*
- * These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
- * work for user pages and always return true for kernel pages.
- */
 static inline int pte_protnone(pte_t pte)
 static inline int pte_protnone(pte_t pte)
 {
 {
-	return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
-		cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED);
+	return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
+		cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
+#define pte_mk_savedwrite pte_mk_savedwrite
+static inline pte_t pte_mk_savedwrite(pte_t pte)
+{
+	/*
+	 * Used by Autonuma subsystem to preserve the write bit
+	 * while marking the pte PROT_NONE. Only allow this
+	 * on PROT_NONE pte
+	 */
+	VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
+		  cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
+	return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
+}
+
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
+{
+	/*
+	 * Used by KSM subsystem to make a protnone pte readonly.
+	 */
+	VM_BUG_ON(!pte_protnone(pte));
+	return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
+}
+
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+	/*
+	 * Saved write ptes are prot none ptes that doesn't have
+	 * privileged bit sit. We mark prot none as one which has
+	 * present and pviliged bit set and RWX cleared. To mark
+	 * protnone which used to have _PAGE_WRITE set we clear
+	 * the privileged bit.
+	 */
+	VM_BUG_ON(!pte_protnone(pte));
+	return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
 }
 }
 #endif /* CONFIG_NUMA_BALANCING */
 #endif /* CONFIG_NUMA_BALANCING */
 
 
@@ -873,6 +908,8 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mk_savedwrite(pmd)	pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
+#define pmd_clear_savedwrite(pmd)	pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
 
 
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 #define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
 #define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
@@ -889,6 +926,7 @@ static inline int pmd_protnone(pmd_t pmd)
 
 
 #define __HAVE_ARCH_PMD_WRITE
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
+#define pmd_savedwrite(pmd)	pte_savedwrite(pmd_pte(pmd))
 
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);

+ 1 - 0
arch/powerpc/kernel/pci-common.c

@@ -25,6 +25,7 @@
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 #include <linux/of_pci.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
+#include <linux/shmem_fs.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
 #include <linux/irq.h>
 #include <linux/irq.h>

+ 2 - 2
arch/powerpc/kvm/book3s_64_vio.c

@@ -102,9 +102,9 @@ static void release_spapr_tce_table(struct rcu_head *head)
 	kfree(stt);
 	kfree(stt);
 }
 }
 
 
-static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int kvm_spapr_tce_fault(struct vm_fault *vmf)
 {
 {
-	struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
 	struct page *page;
 	struct page *page;
 
 
 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))

+ 2 - 1
arch/powerpc/kvm/book3s_hv_builtin.c

@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
 {
 {
 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
 
-	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
+	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
+			 GFP_KERNEL);
 }
 }
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 
 

+ 19 - 20
arch/powerpc/platforms/cell/spufs/file.c

@@ -233,8 +233,9 @@ spufs_mem_write(struct file *file, const char __user *buffer,
 }
 }
 
 
 static int
 static int
-spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mem_mmap_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct spu_context *ctx	= vma->vm_file->private_data;
 	struct spu_context *ctx	= vma->vm_file->private_data;
 	unsigned long pfn, offset;
 	unsigned long pfn, offset;
 
 
@@ -311,12 +312,11 @@ static const struct file_operations spufs_mem_fops = {
 	.mmap			= spufs_mem_mmap,
 	.mmap			= spufs_mem_mmap,
 };
 };
 
 
-static int spufs_ps_fault(struct vm_area_struct *vma,
-				    struct vm_fault *vmf,
+static int spufs_ps_fault(struct vm_fault *vmf,
 				    unsigned long ps_offs,
 				    unsigned long ps_offs,
 				    unsigned long ps_size)
 				    unsigned long ps_size)
 {
 {
-	struct spu_context *ctx = vma->vm_file->private_data;
+	struct spu_context *ctx = vmf->vma->vm_file->private_data;
 	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
 	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
 	int ret = 0;
 	int ret = 0;
 
 
@@ -354,7 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
 		down_read(&current->mm->mmap_sem);
 		down_read(&current->mm->mmap_sem);
 	} else {
 	} else {
 		area = ctx->spu->problem_phys + ps_offs;
 		area = ctx->spu->problem_phys + ps_offs;
-		vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
+		vm_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT);
 		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
 		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
 	}
 	}
 
 
@@ -367,10 +367,9 @@ refault:
 }
 }
 
 
 #if SPUFS_MMAP_4K
 #if SPUFS_MMAP_4K
-static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
-					   struct vm_fault *vmf)
+static int spufs_cntl_mmap_fault(struct vm_fault *vmf)
 {
 {
-	return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
 }
 }
 
 
 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
@@ -1067,15 +1066,15 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
 }
 }
 
 
 static int
 static int
-spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_signal1_mmap_fault(struct vm_fault *vmf)
 {
 {
 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
-	return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
 	 * signal 1 and 2 area
 	 * signal 1 and 2 area
 	 */
 	 */
-	return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
 #else
 #else
 #error unsupported page size
 #error unsupported page size
 #endif
 #endif
@@ -1205,15 +1204,15 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
 
 
 #if SPUFS_MMAP_4K
 #if SPUFS_MMAP_4K
 static int
 static int
-spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_signal2_mmap_fault(struct vm_fault *vmf)
 {
 {
 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
-	return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
 	 * signal 1 and 2 area
 	 * signal 1 and 2 area
 	 */
 	 */
-	return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
 #else
 #else
 #error unsupported page size
 #error unsupported page size
 #endif
 #endif
@@ -1334,9 +1333,9 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
 
 
 #if SPUFS_MMAP_4K
 #if SPUFS_MMAP_4K
 static int
 static int
-spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mss_mmap_fault(struct vm_fault *vmf)
 {
 {
-	return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
 }
 }
 
 
 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
@@ -1396,9 +1395,9 @@ static const struct file_operations spufs_mss_fops = {
 };
 };
 
 
 static int
 static int
-spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_psmap_mmap_fault(struct vm_fault *vmf)
 {
 {
-	return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
 }
 }
 
 
 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
@@ -1456,9 +1455,9 @@ static const struct file_operations spufs_psmap_fops = {
 
 
 #if SPUFS_MMAP_4K
 #if SPUFS_MMAP_4K
 static int
 static int
-spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+spufs_mfc_mmap_fault(struct vm_fault *vmf)
 {
 {
-	return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
+	return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
 }
 }
 
 
 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {

+ 1 - 0
arch/s390/kernel/crash_dump.c

@@ -32,6 +32,7 @@ static struct memblock_type oldmem_type = {
 	.max = 1,
 	.max = 1,
 	.total_size = 0,
 	.total_size = 0,
 	.regions = &oldmem_region,
 	.regions = &oldmem_region,
+	.name = "oldmem",
 };
 };
 
 
 struct save_area {
 struct save_area {

+ 1 - 1
arch/tile/mm/elf.c

@@ -143,7 +143,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
 		unsigned long addr = MEM_USER_INTRPT;
 		unsigned long addr = MEM_USER_INTRPT;
 		addr = mmap_region(NULL, addr, INTRPT_SIZE,
 		addr = mmap_region(NULL, addr, INTRPT_SIZE,
 				   VM_READ|VM_EXEC|
 				   VM_READ|VM_EXEC|
-				   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0);
+				   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0, NULL);
 		if (addr > (unsigned long) -PAGE_SIZE)
 		if (addr > (unsigned long) -PAGE_SIZE)
 			retval = (int) addr;
 			retval = (int) addr;
 	}
 	}

+ 1 - 0
arch/x86/Kconfig

@@ -109,6 +109,7 @@ config X86
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+	select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
 	select HAVE_ARCH_VMAP_STACK		if X86_64
 	select HAVE_ARCH_VMAP_STACK		if X86_64
 	select HAVE_ARCH_WITHIN_STACK_FRAMES
 	select HAVE_ARCH_WITHIN_STACK_FRAMES
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CC_STACKPROTECTOR

+ 1 - 1
arch/x86/entry/vdso/vma.c

@@ -186,7 +186,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
 
 
 	if (IS_ERR(vma)) {
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		ret = PTR_ERR(vma);
-		do_munmap(mm, text_start, image->size);
+		do_munmap(mm, text_start, image->size, NULL);
 	} else {
 	} else {
 		current->mm->context.vdso = (void __user *)text_start;
 		current->mm->context.vdso = (void __user *)text_start;
 		current->mm->context.vdso_image = image;
 		current->mm->context.vdso_image = image;

+ 11 - 0
arch/x86/include/asm/paravirt.h

@@ -475,6 +475,17 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 			    native_pmd_val(pmd));
 			    native_pmd_val(pmd));
 }
 }
 
 
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+			      pud_t *pudp, pud_t pud)
+{
+	if (sizeof(pudval_t) > sizeof(long))
+		/* 5 arg words */
+		pv_mmu_ops.set_pud_at(mm, addr, pudp, pud);
+	else
+		PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp,
+			    native_pud_val(pud));
+}
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
 {
 	pmdval_t val = native_pmd_val(pmd);
 	pmdval_t val = native_pmd_val(pmd);

+ 2 - 0
arch/x86/include/asm/paravirt_types.h

@@ -249,6 +249,8 @@ struct pv_mmu_ops {
 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
 	void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
 	void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
 			   pmd_t *pmdp, pmd_t pmdval);
 			   pmd_t *pmdp, pmd_t pmdval);
+	void (*set_pud_at)(struct mm_struct *mm, unsigned long addr,
+			   pud_t *pudp, pud_t pudval);
 	void (*pte_update)(struct mm_struct *mm, unsigned long addr,
 	void (*pte_update)(struct mm_struct *mm, unsigned long addr,
 			   pte_t *ptep);
 			   pte_t *ptep);
 
 

+ 17 - 0
arch/x86/include/asm/pgtable-2level.h

@@ -21,6 +21,10 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 	*pmdp = pmd;
 	*pmdp = pmd;
 }
 }
 
 
+static inline void native_set_pud(pud_t *pudp, pud_t pud)
+{
+}
+
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
 {
 	native_set_pte(ptep, pte);
 	native_set_pte(ptep, pte);
@@ -31,6 +35,10 @@ static inline void native_pmd_clear(pmd_t *pmdp)
 	native_set_pmd(pmdp, __pmd(0));
 	native_set_pmd(pmdp, __pmd(0));
 }
 }
 
 
+static inline void native_pud_clear(pud_t *pudp)
+{
+}
+
 static inline void native_pte_clear(struct mm_struct *mm,
 static inline void native_pte_clear(struct mm_struct *mm,
 				    unsigned long addr, pte_t *xp)
 				    unsigned long addr, pte_t *xp)
 {
 {
@@ -55,6 +63,15 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 #endif
 
 
+#ifdef CONFIG_SMP
+static inline pud_t native_pudp_get_and_clear(pud_t *xp)
+{
+	return __pud(xchg((pudval_t *)xp, 0));
+}
+#else
+#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
+#endif
+
 /* Bit manipulation helper on pte/pgoff entry */
 /* Bit manipulation helper on pte/pgoff entry */
 static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
 static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
 				      unsigned long mask, unsigned int leftshift)
 				      unsigned long mask, unsigned int leftshift)

+ 30 - 0
arch/x86/include/asm/pgtable-3level.h

@@ -121,6 +121,12 @@ static inline void native_pmd_clear(pmd_t *pmd)
 	*(tmp + 1) = 0;
 	*(tmp + 1) = 0;
 }
 }
 
 
+#ifndef CONFIG_SMP
+static inline void native_pud_clear(pud_t *pudp)
+{
+}
+#endif
+
 static inline void pud_clear(pud_t *pudp)
 static inline void pud_clear(pud_t *pudp)
 {
 {
 	set_pud(pudp, __pud(0));
 	set_pud(pudp, __pud(0));
@@ -176,6 +182,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 #endif
 
 
+#ifdef CONFIG_SMP
+union split_pud {
+	struct {
+		u32 pud_low;
+		u32 pud_high;
+	};
+	pud_t pud;
+};
+
+static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
+{
+	union split_pud res, *orig = (union split_pud *)pudp;
+
+	/* xchg acts as a barrier before setting of the high bits */
+	res.pud_low = xchg(&orig->pud_low, 0);
+	res.pud_high = orig->pud_high;
+	orig->pud_high = 0;
+
+	return res.pud;
+}
+#else
+#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
+#endif
+
 /* Encode and de-code a swap entry */
 /* Encode and de-code a swap entry */
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
 #define __swp_type(x)			(((x).val) & 0x1f)
 #define __swp_type(x)			(((x).val) & 0x1f)

+ 140 - 0
arch/x86/include/asm/pgtable.h

@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
 #define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
 #define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
+#define set_pud_at(mm, addr, pudp, pud)	native_set_pud_at(mm, addr, pudp, pud)
 
 
 #define set_pte_atomic(ptep, pte)					\
 #define set_pte_atomic(ptep, pte)					\
 	native_set_pte_atomic(ptep, pte)
 	native_set_pte_atomic(ptep, pte)
@@ -128,6 +129,16 @@ static inline int pmd_young(pmd_t pmd)
 	return pmd_flags(pmd) & _PAGE_ACCESSED;
 	return pmd_flags(pmd) & _PAGE_ACCESSED;
 }
 }
 
 
+static inline int pud_dirty(pud_t pud)
+{
+	return pud_flags(pud) & _PAGE_DIRTY;
+}
+
+static inline int pud_young(pud_t pud)
+{
+	return pud_flags(pud) & _PAGE_ACCESSED;
+}
+
 static inline int pte_write(pte_t pte)
 static inline int pte_write(pte_t pte)
 {
 {
 	return pte_flags(pte) & _PAGE_RW;
 	return pte_flags(pte) & _PAGE_RW;
@@ -181,6 +192,13 @@ static inline int pmd_trans_huge(pmd_t pmd)
 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 }
 }
 
 
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_trans_huge(pud_t pud)
+{
+	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+}
+#endif
+
 #define has_transparent_hugepage has_transparent_hugepage
 #define has_transparent_hugepage has_transparent_hugepage
 static inline int has_transparent_hugepage(void)
 static inline int has_transparent_hugepage(void)
 {
 {
@@ -192,6 +210,18 @@ static inline int pmd_devmap(pmd_t pmd)
 {
 {
 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
 	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
 }
 }
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_devmap(pud_t pud)
+{
+	return !!(pud_val(pud) & _PAGE_DEVMAP);
+}
+#else
+static inline int pud_devmap(pud_t pud)
+{
+	return 0;
+}
+#endif
 #endif
 #endif
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 
@@ -333,6 +363,65 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
 	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
 }
 }
 
 
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+	pudval_t v = native_pud_val(pud);
+
+	return __pud(v | set);
+}
+
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+	pudval_t v = native_pud_val(pud);
+
+	return __pud(v & ~clear);
+}
+
+static inline pud_t pud_mkold(pud_t pud)
+{
+	return pud_clear_flags(pud, _PAGE_ACCESSED);
+}
+
+static inline pud_t pud_mkclean(pud_t pud)
+{
+	return pud_clear_flags(pud, _PAGE_DIRTY);
+}
+
+static inline pud_t pud_wrprotect(pud_t pud)
+{
+	return pud_clear_flags(pud, _PAGE_RW);
+}
+
+static inline pud_t pud_mkdirty(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pud_t pud_mkdevmap(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_DEVMAP);
+}
+
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_PSE);
+}
+
+static inline pud_t pud_mkyoung(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_ACCESSED);
+}
+
+static inline pud_t pud_mkwrite(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_RW);
+}
+
+static inline pud_t pud_mknotpresent(pud_t pud)
+{
+	return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
+}
+
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 static inline int pte_soft_dirty(pte_t pte)
 static inline int pte_soft_dirty(pte_t pte)
 {
 {
@@ -344,6 +433,11 @@ static inline int pmd_soft_dirty(pmd_t pmd)
 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
 }
 }
 
 
+static inline int pud_soft_dirty(pud_t pud)
+{
+	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
+}
+
 static inline pte_t pte_mksoft_dirty(pte_t pte)
 static inline pte_t pte_mksoft_dirty(pte_t pte)
 {
 {
 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
@@ -354,6 +448,11 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 }
 
 
+static inline pud_t pud_mksoft_dirty(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
+}
+
 static inline pte_t pte_clear_soft_dirty(pte_t pte)
 static inline pte_t pte_clear_soft_dirty(pte_t pte)
 {
 {
 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
@@ -364,6 +463,11 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 }
 
 
+static inline pud_t pud_clear_soft_dirty(pud_t pud)
+{
+	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
+}
+
 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 
 
 /*
 /*
@@ -392,6 +496,12 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 		     massage_pgprot(pgprot));
 		     massage_pgprot(pgprot));
 }
 }
 
 
+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+{
+	return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
+		     massage_pgprot(pgprot));
+}
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 {
 	pteval_t val = pte_val(pte);
 	pteval_t val = pte_val(pte);
@@ -771,6 +881,14 @@ static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
 	return res;
 	return res;
 }
 }
 
 
+static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
+{
+	pud_t res = *pudp;
+
+	native_pud_clear(pudp);
+	return res;
+}
+
 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 				     pte_t *ptep , pte_t pte)
 				     pte_t *ptep , pte_t pte)
 {
 {
@@ -783,6 +901,12 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
 	native_set_pmd(pmdp, pmd);
 	native_set_pmd(pmdp, pmd);
 }
 }
 
 
+static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr,
+				     pud_t *pudp, pud_t pud)
+{
+	native_set_pud(pudp, pud);
+}
+
 #ifndef CONFIG_PARAVIRT
 #ifndef CONFIG_PARAVIRT
 /*
 /*
  * Rules for using pte_update - it must be called after any PTE update which
  * Rules for using pte_update - it must be called after any PTE update which
@@ -861,10 +985,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
 				 unsigned long address, pmd_t *pmdp,
 				 unsigned long address, pmd_t *pmdp,
 				 pmd_t entry, int dirty);
 				 pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pud_t *pudp,
+				 pud_t entry, int dirty);
 
 
 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 				     unsigned long addr, pmd_t *pmdp);
 				     unsigned long addr, pmd_t *pmdp);
+extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
+				     unsigned long addr, pud_t *pudp);
 
 
 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
@@ -884,6 +1013,13 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long
 	return native_pmdp_get_and_clear(pmdp);
 	return native_pmdp_get_and_clear(pmdp);
 }
 }
 
 
+#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+					unsigned long addr, pud_t *pudp)
+{
+	return native_pudp_get_and_clear(pudp);
+}
+
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 				      unsigned long addr, pmd_t *pmdp)
 				      unsigned long addr, pmd_t *pmdp)
@@ -932,6 +1068,10 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
 		unsigned long addr, pmd_t *pmd)
 		unsigned long addr, pmd_t *pmd)
 {
 {
 }
 }
+static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pud)
+{
+}
 
 
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)

+ 15 - 0
arch/x86/include/asm/pgtable_64.h

@@ -106,6 +106,21 @@ static inline void native_pud_clear(pud_t *pud)
 	native_set_pud(pud, native_make_pud(0));
 	native_set_pud(pud, native_make_pud(0));
 }
 }
 
 
+static inline pud_t native_pudp_get_and_clear(pud_t *xp)
+{
+#ifdef CONFIG_SMP
+	return native_make_pud(xchg(&xp->pud, 0));
+#else
+	/* native_local_pudp_get_and_clear,
+	 * but duplicated because of cyclic dependency
+	 */
+	pud_t ret = *xp;
+
+	native_pud_clear(xp);
+	return ret;
+#endif
+}
+
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
 {
 	*pgdp = pgd;
 	*pgdp = pgd;

+ 1 - 0
arch/x86/kernel/paravirt.c

@@ -425,6 +425,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
 	.pmd_clear = native_pmd_clear,
 	.pmd_clear = native_pmd_clear,
 #endif
 #endif
 	.set_pud = native_set_pud,
 	.set_pud = native_set_pud,
+	.set_pud_at = native_set_pud_at,
 
 
 	.pmd_val = PTE_IDENT,
 	.pmd_val = PTE_IDENT,
 	.make_pmd = PTE_IDENT,
 	.make_pmd = PTE_IDENT,

+ 2 - 1
arch/x86/kernel/pci-dma.c

@@ -91,7 +91,8 @@ again:
 	page = NULL;
 	page = NULL;
 	/* CMA can be used only in the context which permits sleeping */
 	/* CMA can be used only in the context which permits sleeping */
 	if (gfpflags_allow_blocking(flag)) {
 	if (gfpflags_allow_blocking(flag)) {
-		page = dma_alloc_from_contiguous(dev, count, get_order(size));
+		page = dma_alloc_from_contiguous(dev, count, get_order(size),
+						 flag);
 		if (page && page_to_phys(page) + size > dma_mask) {
 		if (page && page_to_phys(page) + size > dma_mask) {
 			dma_release_from_contiguous(dev, page, count);
 			dma_release_from_contiguous(dev, page, count);
 			page = NULL;
 			page = NULL;

+ 24 - 4
arch/x86/mm/gup.c

@@ -154,14 +154,12 @@ static inline void get_head_page_multiple(struct page *page, int nr)
 	SetPageReferenced(page);
 	SetPageReferenced(page);
 }
 }
 
 
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+static int __gup_device_huge(unsigned long pfn, unsigned long addr,
 		unsigned long end, struct page **pages, int *nr)
 		unsigned long end, struct page **pages, int *nr)
 {
 {
 	int nr_start = *nr;
 	int nr_start = *nr;
-	unsigned long pfn = pmd_pfn(pmd);
 	struct dev_pagemap *pgmap = NULL;
 	struct dev_pagemap *pgmap = NULL;
 
 
-	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
 	do {
 	do {
 		struct page *page = pfn_to_page(pfn);
 		struct page *page = pfn_to_page(pfn);
 
 
@@ -180,6 +178,24 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
 	return 1;
 	return 1;
 }
 }
 
 
+static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
+		unsigned long end, struct page **pages, int *nr)
+{
+	unsigned long fault_pfn;
+
+	fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+}
+
+static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
+		unsigned long end, struct page **pages, int *nr)
+{
+	unsigned long fault_pfn;
+
+	fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
+}
+
 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
 		unsigned long end, int write, struct page **pages, int *nr)
 		unsigned long end, int write, struct page **pages, int *nr)
 {
 {
@@ -251,9 +267,13 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
 
 
 	if (!pte_allows_gup(pud_val(pud), write))
 	if (!pte_allows_gup(pud_val(pud), write))
 		return 0;
 		return 0;
+
+	VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
+	if (pud_devmap(pud))
+		return __gup_device_huge_pud(pud, addr, end, pages, nr);
+
 	/* hugepages are never "special" */
 	/* hugepages are never "special" */
 	VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
 	VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
-	VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
 
 
 	refs = 0;
 	refs = 0;
 	head = pud_page(pud);
 	head = pud_page(pud);

+ 2 - 2
arch/x86/mm/mpx.c

@@ -51,7 +51,7 @@ static unsigned long mpx_mmap(unsigned long len)
 
 
 	down_write(&mm->mmap_sem);
 	down_write(&mm->mmap_sem);
 	addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
 	addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
-			MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate);
+		       MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
 	up_write(&mm->mmap_sem);
 	up_write(&mm->mmap_sem);
 	if (populate)
 	if (populate)
 		mm_populate(addr, populate);
 		mm_populate(addr, populate);
@@ -893,7 +893,7 @@ static int unmap_entire_bt(struct mm_struct *mm,
 	 * avoid recursion, do_munmap() will check whether it comes
 	 * avoid recursion, do_munmap() will check whether it comes
 	 * from one bounds table through VM_MPX flag.
 	 * from one bounds table through VM_MPX flag.
 	 */
 	 */
-	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm));
+	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
 }
 }
 
 
 static int try_unmap_single_bt(struct mm_struct *mm,
 static int try_unmap_single_bt(struct mm_struct *mm,

+ 31 - 0
arch/x86/mm/pgtable.c

@@ -445,6 +445,26 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
 
 
 	return changed;
 	return changed;
 }
 }
+
+int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+			  pud_t *pudp, pud_t entry, int dirty)
+{
+	int changed = !pud_same(*pudp, entry);
+
+	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
+
+	if (changed && dirty) {
+		*pudp = entry;
+		/*
+		 * We had a write-protection fault here and changed the pud
+		 * to to more permissive. No need to flush the TLB for that,
+		 * #PF is architecturally guaranteed to do that and in the
+		 * worst-case we'll generate a spurious fault.
+		 */
+	}
+
+	return changed;
+}
 #endif
 #endif
 
 
 int ptep_test_and_clear_young(struct vm_area_struct *vma,
 int ptep_test_and_clear_young(struct vm_area_struct *vma,
@@ -474,6 +494,17 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 
 
 	return ret;
 	return ret;
 }
 }
+int pudp_test_and_clear_young(struct vm_area_struct *vma,
+			      unsigned long addr, pud_t *pudp)
+{
+	int ret = 0;
+
+	if (pud_young(*pudp))
+		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+					 (unsigned long *)pudp);
+
+	return ret;
+}
 #endif
 #endif
 
 
 int ptep_clear_flush_young(struct vm_area_struct *vma,
 int ptep_clear_flush_young(struct vm_area_struct *vma,

+ 2 - 1
arch/xtensa/kernel/pci-dma.c

@@ -158,7 +158,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
 		flag |= GFP_DMA;
 		flag |= GFP_DMA;
 
 
 	if (gfpflags_allow_blocking(flag))
 	if (gfpflags_allow_blocking(flag))
-		page = dma_alloc_from_contiguous(dev, count, get_order(size));
+		page = dma_alloc_from_contiguous(dev, count, get_order(size),
+						 flag);
 
 
 	if (!page)
 	if (!page)
 		page = alloc_pages(flag, get_order(size));
 		page = alloc_pages(flag, get_order(size));

+ 9 - 14
crypto/lz4.c

@@ -66,15 +66,13 @@ static void lz4_exit(struct crypto_tfm *tfm)
 static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
 static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
 				 u8 *dst, unsigned int *dlen, void *ctx)
 				 u8 *dst, unsigned int *dlen, void *ctx)
 {
 {
-	size_t tmp_len = *dlen;
-	int err;
+	int out_len = LZ4_compress_default(src, dst,
+		slen, *dlen, ctx);
 
 
-	err = lz4_compress(src, slen, dst, &tmp_len, ctx);
-
-	if (err < 0)
+	if (!out_len)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	*dlen = tmp_len;
+	*dlen = out_len;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -96,16 +94,13 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
 static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
 static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
 				   u8 *dst, unsigned int *dlen, void *ctx)
 				   u8 *dst, unsigned int *dlen, void *ctx)
 {
 {
-	int err;
-	size_t tmp_len = *dlen;
-	size_t __slen = slen;
+	int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
 
 
-	err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
-	if (err < 0)
-		return -EINVAL;
+	if (out_len < 0)
+		return out_len;
 
 
-	*dlen = tmp_len;
-	return err;
+	*dlen = out_len;
+	return 0;
 }
 }
 
 
 static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
 static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,

+ 9 - 14
crypto/lz4hc.c

@@ -65,15 +65,13 @@ static void lz4hc_exit(struct crypto_tfm *tfm)
 static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
 static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
 				   u8 *dst, unsigned int *dlen, void *ctx)
 				   u8 *dst, unsigned int *dlen, void *ctx)
 {
 {
-	size_t tmp_len = *dlen;
-	int err;
+	int out_len = LZ4_compress_HC(src, dst, slen,
+		*dlen, LZ4HC_DEFAULT_CLEVEL, ctx);
 
 
-	err = lz4hc_compress(src, slen, dst, &tmp_len, ctx);
-
-	if (err < 0)
+	if (!out_len)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	*dlen = tmp_len;
+	*dlen = out_len;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -97,16 +95,13 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
 static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
 static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
 				     u8 *dst, unsigned int *dlen, void *ctx)
 				     u8 *dst, unsigned int *dlen, void *ctx)
 {
 {
-	int err;
-	size_t tmp_len = *dlen;
-	size_t __slen = slen;
+	int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
 
 
-	err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
-	if (err < 0)
-		return -EINVAL;
+	if (out_len < 0)
+		return out_len;
 
 
-	*dlen = tmp_len;
-	return err;
+	*dlen = out_len;
+	return 0;
 }
 }
 
 
 static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
 static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,

+ 102 - 40
crypto/testmgr.h

@@ -34293,61 +34293,123 @@ static struct hash_testvec bfin_crc_tv_template[] = {
 
 
 static struct comp_testvec lz4_comp_tv_template[] = {
 static struct comp_testvec lz4_comp_tv_template[] = {
 	{
 	{
-		.inlen	= 70,
-		.outlen	= 45,
-		.input	= "Join us now and share the software "
-			  "Join us now and share the software ",
-		.output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
-			  "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
-			  "\x64\x20\x73\x68\x61\x72\x65\x20"
-			  "\x74\x68\x65\x20\x73\x6f\x66\x74"
-			  "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
-			  "\x77\x61\x72\x65\x20",
+		.inlen	= 255,
+		.outlen	= 218,
+		.input	= "LZ4 is lossless compression algorithm, providing"
+			 " compression speed at 400 MB/s per core, scalable "
+			 "with multi-cores CPU. It features an extremely fast "
+			 "decoder, with speed in multiple GB/s per core, "
+			 "typically reaching RAM speed limits on multi-core "
+			 "systems.",
+		.output	= "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+			  "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+			  "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+			  "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+			  "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+			  "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+			  "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+			  "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+			  "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+			  "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+			  "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+			  "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+			  "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+			  "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+			  "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83"
+			  "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00"
+			  "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e",
+
 	},
 	},
 };
 };
 
 
 static struct comp_testvec lz4_decomp_tv_template[] = {
 static struct comp_testvec lz4_decomp_tv_template[] = {
 	{
 	{
-		.inlen	= 45,
-		.outlen	= 70,
-		.input  = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
-			  "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
-			  "\x64\x20\x73\x68\x61\x72\x65\x20"
-			  "\x74\x68\x65\x20\x73\x6f\x66\x74"
-			  "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
-			  "\x77\x61\x72\x65\x20",
-		.output	= "Join us now and share the software "
-			  "Join us now and share the software ",
+		.inlen	= 218,
+		.outlen	= 255,
+		.input	= "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+			  "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+			  "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+			  "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+			  "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+			  "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+			  "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+			  "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+			  "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+			  "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+			  "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+			  "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+			  "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+			  "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+			  "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83"
+			  "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00"
+			  "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e",
+		.output	= "LZ4 is lossless compression algorithm, providing"
+			 " compression speed at 400 MB/s per core, scalable "
+			 "with multi-cores CPU. It features an extremely fast "
+			 "decoder, with speed in multiple GB/s per core, "
+			 "typically reaching RAM speed limits on multi-core "
+			 "systems.",
 	},
 	},
 };
 };
 
 
 static struct comp_testvec lz4hc_comp_tv_template[] = {
 static struct comp_testvec lz4hc_comp_tv_template[] = {
 	{
 	{
-		.inlen	= 70,
-		.outlen	= 45,
-		.input	= "Join us now and share the software "
-			  "Join us now and share the software ",
-		.output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
-			  "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
-			  "\x64\x20\x73\x68\x61\x72\x65\x20"
-			  "\x74\x68\x65\x20\x73\x6f\x66\x74"
-			  "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
-			  "\x77\x61\x72\x65\x20",
+		.inlen	= 255,
+		.outlen	= 216,
+		.input	= "LZ4 is lossless compression algorithm, providing"
+			 " compression speed at 400 MB/s per core, scalable "
+			 "with multi-cores CPU. It features an extremely fast "
+			 "decoder, with speed in multiple GB/s per core, "
+			 "typically reaching RAM speed limits on multi-core "
+			 "systems.",
+		.output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+			  "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+			  "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+			  "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+			  "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+			  "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+			  "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+			  "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+			  "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+			  "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+			  "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+			  "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+			  "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+			  "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+			  "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97"
+			  "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20"
+			  "\x73\x79\x73\x74\x65\x6d\x73\x2e",
+
 	},
 	},
 };
 };
 
 
 static struct comp_testvec lz4hc_decomp_tv_template[] = {
 static struct comp_testvec lz4hc_decomp_tv_template[] = {
 	{
 	{
-		.inlen	= 45,
-		.outlen	= 70,
-		.input  = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
-			  "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
-			  "\x64\x20\x73\x68\x61\x72\x65\x20"
-			  "\x74\x68\x65\x20\x73\x6f\x66\x74"
-			  "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
-			  "\x77\x61\x72\x65\x20",
-		.output	= "Join us now and share the software "
-			  "Join us now and share the software ",
+		.inlen	= 216,
+		.outlen	= 255,
+		.input	= "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+			  "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+			  "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+			  "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+			  "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+			  "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+			  "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+			  "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+			  "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+			  "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+			  "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+			  "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+			  "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+			  "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+			  "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97"
+			  "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20"
+			  "\x73\x79\x73\x74\x65\x6d\x73\x2e",
+		.output	= "LZ4 is lossless compression algorithm, providing"
+			 " compression speed at 400 MB/s per core, scalable "
+			 "with multi-cores CPU. It features an extremely fast "
+			 "decoder, with speed in multiple GB/s per core, "
+			 "typically reaching RAM speed limits on multi-core "
+			 "systems.",
 	},
 	},
 };
 };
 
 

+ 1 - 1
drivers/android/binder.c

@@ -3342,7 +3342,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 }
 }
 
 
-static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int binder_vm_fault(struct vm_fault *vmf)
 {
 {
 	return VM_FAULT_SIGBUS;
 	return VM_FAULT_SIGBUS;
 }
 }

+ 5 - 0
drivers/base/core.c

@@ -638,6 +638,11 @@ int lock_device_hotplug_sysfs(void)
 	return restart_syscall();
 	return restart_syscall();
 }
 }
 
 
+void assert_held_device_hotplug(void)
+{
+	lockdep_assert_held(&device_hotplug_lock);
+}
+
 #ifdef CONFIG_BLOCK
 #ifdef CONFIG_BLOCK
 static inline int device_is_not_partition(struct device *dev)
 static inline int device_is_not_partition(struct device *dev)
 {
 {

+ 3 - 2
drivers/base/dma-contiguous.c

@@ -181,6 +181,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  * @dev:   Pointer to device for which the allocation is performed.
  * @dev:   Pointer to device for which the allocation is performed.
  * @count: Requested number of pages.
  * @count: Requested number of pages.
  * @align: Requested alignment of pages (in PAGE_SIZE order).
  * @align: Requested alignment of pages (in PAGE_SIZE order).
+ * @gfp_mask: GFP flags to use for this allocation.
  *
  *
  * This function allocates memory buffer for specified device. It uses
  * This function allocates memory buffer for specified device. It uses
  * device specific contiguous memory area if available or the default
  * device specific contiguous memory area if available or the default
@@ -188,12 +189,12 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  * function.
  * function.
  */
  */
 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
-				       unsigned int align)
+				       unsigned int align, gfp_t gfp_mask)
 {
 {
 	if (align > CONFIG_CMA_ALIGNMENT)
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
 		align = CONFIG_CMA_ALIGNMENT;
 
 
-	return cma_alloc(dev_get_cma_area(dev), count, align);
+	return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
 }
 }
 
 
 /**
 /**

+ 1 - 1
drivers/base/memory.c

@@ -249,7 +249,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
 	return ret;
 	return ret;
 }
 }
 
 
-int memory_block_change_state(struct memory_block *mem,
+static int memory_block_change_state(struct memory_block *mem,
 		unsigned long to_state, unsigned long from_state_req)
 		unsigned long to_state, unsigned long from_state_req)
 {
 {
 	int ret = 0;
 	int ret = 0;

+ 63 - 64
drivers/block/zram/zram_drv.c

@@ -74,6 +74,17 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index,
 	meta->table[index].value &= ~BIT(flag);
 	meta->table[index].value &= ~BIT(flag);
 }
 }
 
 
+static inline void zram_set_element(struct zram_meta *meta, u32 index,
+			unsigned long element)
+{
+	meta->table[index].element = element;
+}
+
+static inline void zram_clear_element(struct zram_meta *meta, u32 index)
+{
+	meta->table[index].element = 0;
+}
+
 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
 {
 {
 	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
 	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -146,31 +157,46 @@ static inline void update_used_max(struct zram *zram,
 	} while (old_max != cur_max);
 	} while (old_max != cur_max);
 }
 }
 
 
-static bool page_zero_filled(void *ptr)
+static inline void zram_fill_page(char *ptr, unsigned long len,
+					unsigned long value)
+{
+	int i;
+	unsigned long *page = (unsigned long *)ptr;
+
+	WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
+
+	if (likely(value == 0)) {
+		memset(ptr, 0, len);
+	} else {
+		for (i = 0; i < len / sizeof(*page); i++)
+			page[i] = value;
+	}
+}
+
+static bool page_same_filled(void *ptr, unsigned long *element)
 {
 {
 	unsigned int pos;
 	unsigned int pos;
 	unsigned long *page;
 	unsigned long *page;
 
 
 	page = (unsigned long *)ptr;
 	page = (unsigned long *)ptr;
 
 
-	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
-		if (page[pos])
+	for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
+		if (page[pos] != page[pos + 1])
 			return false;
 			return false;
 	}
 	}
 
 
+	*element = page[pos];
+
 	return true;
 	return true;
 }
 }
 
 
-static void handle_zero_page(struct bio_vec *bvec)
+static void handle_same_page(struct bio_vec *bvec, unsigned long element)
 {
 {
 	struct page *page = bvec->bv_page;
 	struct page *page = bvec->bv_page;
 	void *user_mem;
 	void *user_mem;
 
 
 	user_mem = kmap_atomic(page);
 	user_mem = kmap_atomic(page);
-	if (is_partial_io(bvec))
-		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
-	else
-		clear_page(user_mem);
+	zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
 	kunmap_atomic(user_mem);
 	kunmap_atomic(user_mem);
 
 
 	flush_dcache_page(page);
 	flush_dcache_page(page);
@@ -363,7 +389,7 @@ static ssize_t mm_stat_show(struct device *dev,
 			mem_used << PAGE_SHIFT,
 			mem_used << PAGE_SHIFT,
 			zram->limit_pages << PAGE_SHIFT,
 			zram->limit_pages << PAGE_SHIFT,
 			max_used << PAGE_SHIFT,
 			max_used << PAGE_SHIFT,
-			(u64)atomic64_read(&zram->stats.zero_pages),
+			(u64)atomic64_read(&zram->stats.same_pages),
 			pool_stats.pages_compacted);
 			pool_stats.pages_compacted);
 	up_read(&zram->init_lock);
 	up_read(&zram->init_lock);
 
 
@@ -391,18 +417,6 @@ static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(debug_stat);
 static DEVICE_ATTR_RO(debug_stat);
 
 
-static inline bool zram_meta_get(struct zram *zram)
-{
-	if (atomic_inc_not_zero(&zram->refcount))
-		return true;
-	return false;
-}
-
-static inline void zram_meta_put(struct zram *zram)
-{
-	atomic_dec(&zram->refcount);
-}
-
 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 {
 {
 	size_t num_pages = disksize >> PAGE_SHIFT;
 	size_t num_pages = disksize >> PAGE_SHIFT;
@@ -411,8 +425,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 	/* Free all pages that are still in this zram device */
 	/* Free all pages that are still in this zram device */
 	for (index = 0; index < num_pages; index++) {
 	for (index = 0; index < num_pages; index++) {
 		unsigned long handle = meta->table[index].handle;
 		unsigned long handle = meta->table[index].handle;
-
-		if (!handle)
+		/*
+		 * No memory is allocated for same element filled pages.
+		 * Simply clear same page flag.
+		 */
+		if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
 			continue;
 			continue;
 
 
 		zs_free(meta->mem_pool, handle);
 		zs_free(meta->mem_pool, handle);
@@ -462,18 +479,20 @@ static void zram_free_page(struct zram *zram, size_t index)
 	struct zram_meta *meta = zram->meta;
 	struct zram_meta *meta = zram->meta;
 	unsigned long handle = meta->table[index].handle;
 	unsigned long handle = meta->table[index].handle;
 
 
-	if (unlikely(!handle)) {
-		/*
-		 * No memory is allocated for zero filled pages.
-		 * Simply clear zero page flag.
-		 */
-		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
-			zram_clear_flag(meta, index, ZRAM_ZERO);
-			atomic64_dec(&zram->stats.zero_pages);
-		}
+	/*
+	 * No memory is allocated for same element filled pages.
+	 * Simply clear same page flag.
+	 */
+	if (zram_test_flag(meta, index, ZRAM_SAME)) {
+		zram_clear_flag(meta, index, ZRAM_SAME);
+		zram_clear_element(meta, index);
+		atomic64_dec(&zram->stats.same_pages);
 		return;
 		return;
 	}
 	}
 
 
+	if (!handle)
+		return;
+
 	zs_free(meta->mem_pool, handle);
 	zs_free(meta->mem_pool, handle);
 
 
 	atomic64_sub(zram_get_obj_size(meta, index),
 	atomic64_sub(zram_get_obj_size(meta, index),
@@ -496,9 +515,9 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 	handle = meta->table[index].handle;
 	handle = meta->table[index].handle;
 	size = zram_get_obj_size(meta, index);
 	size = zram_get_obj_size(meta, index);
 
 
-	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+	if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-		clear_page(mem);
+		zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -534,9 +553,9 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 
 
 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 	if (unlikely(!meta->table[index].handle) ||
 	if (unlikely(!meta->table[index].handle) ||
-			zram_test_flag(meta, index, ZRAM_ZERO)) {
+			zram_test_flag(meta, index, ZRAM_SAME)) {
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-		handle_zero_page(bvec);
+		handle_same_page(bvec, meta->table[index].element);
 		return 0;
 		return 0;
 	}
 	}
 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -584,6 +603,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 	struct zram_meta *meta = zram->meta;
 	struct zram_meta *meta = zram->meta;
 	struct zcomp_strm *zstrm = NULL;
 	struct zcomp_strm *zstrm = NULL;
 	unsigned long alloced_pages;
 	unsigned long alloced_pages;
+	unsigned long element;
 
 
 	page = bvec->bv_page;
 	page = bvec->bv_page;
 	if (is_partial_io(bvec)) {
 	if (is_partial_io(bvec)) {
@@ -612,16 +632,17 @@ compress_again:
 		uncmem = user_mem;
 		uncmem = user_mem;
 	}
 	}
 
 
-	if (page_zero_filled(uncmem)) {
+	if (page_same_filled(uncmem, &element)) {
 		if (user_mem)
 		if (user_mem)
 			kunmap_atomic(user_mem);
 			kunmap_atomic(user_mem);
 		/* Free memory associated with this sector now. */
 		/* Free memory associated with this sector now. */
 		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
 		zram_free_page(zram, index);
 		zram_free_page(zram, index);
-		zram_set_flag(meta, index, ZRAM_ZERO);
+		zram_set_flag(meta, index, ZRAM_SAME);
+		zram_set_element(meta, index, element);
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 
 
-		atomic64_inc(&zram->stats.zero_pages);
+		atomic64_inc(&zram->stats.same_pages);
 		ret = 0;
 		ret = 0;
 		goto out;
 		goto out;
 	}
 	}
@@ -859,22 +880,17 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
 {
 {
 	struct zram *zram = queue->queuedata;
 	struct zram *zram = queue->queuedata;
 
 
-	if (unlikely(!zram_meta_get(zram)))
-		goto error;
-
 	blk_queue_split(queue, &bio, queue->bio_split);
 	blk_queue_split(queue, &bio, queue->bio_split);
 
 
 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
 					bio->bi_iter.bi_size)) {
 					bio->bi_iter.bi_size)) {
 		atomic64_inc(&zram->stats.invalid_io);
 		atomic64_inc(&zram->stats.invalid_io);
-		goto put_zram;
+		goto error;
 	}
 	}
 
 
 	__zram_make_request(zram, bio);
 	__zram_make_request(zram, bio);
-	zram_meta_put(zram);
 	return BLK_QC_T_NONE;
 	return BLK_QC_T_NONE;
-put_zram:
-	zram_meta_put(zram);
+
 error:
 error:
 	bio_io_error(bio);
 	bio_io_error(bio);
 	return BLK_QC_T_NONE;
 	return BLK_QC_T_NONE;
@@ -904,13 +920,11 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
 	struct bio_vec bv;
 	struct bio_vec bv;
 
 
 	zram = bdev->bd_disk->private_data;
 	zram = bdev->bd_disk->private_data;
-	if (unlikely(!zram_meta_get(zram)))
-		goto out;
 
 
 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
 		atomic64_inc(&zram->stats.invalid_io);
 		atomic64_inc(&zram->stats.invalid_io);
 		err = -EINVAL;
 		err = -EINVAL;
-		goto put_zram;
+		goto out;
 	}
 	}
 
 
 	index = sector >> SECTORS_PER_PAGE_SHIFT;
 	index = sector >> SECTORS_PER_PAGE_SHIFT;
@@ -921,8 +935,6 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
 	bv.bv_offset = 0;
 	bv.bv_offset = 0;
 
 
 	err = zram_bvec_rw(zram, &bv, index, offset, is_write);
 	err = zram_bvec_rw(zram, &bv, index, offset, is_write);
-put_zram:
-	zram_meta_put(zram);
 out:
 out:
 	/*
 	/*
 	 * If I/O fails, just return error(ie, non-zero) without
 	 * If I/O fails, just return error(ie, non-zero) without
@@ -955,17 +967,6 @@ static void zram_reset_device(struct zram *zram)
 	meta = zram->meta;
 	meta = zram->meta;
 	comp = zram->comp;
 	comp = zram->comp;
 	disksize = zram->disksize;
 	disksize = zram->disksize;
-	/*
-	 * Refcount will go down to 0 eventually and r/w handler
-	 * cannot handle further I/O so it will bail out by
-	 * check zram_meta_get.
-	 */
-	zram_meta_put(zram);
-	/*
-	 * We want to free zram_meta in process context to avoid
-	 * deadlock between reclaim path and any other locks.
-	 */
-	wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
 
 
 	/* Reset stats */
 	/* Reset stats */
 	memset(&zram->stats, 0, sizeof(zram->stats));
 	memset(&zram->stats, 0, sizeof(zram->stats));
@@ -1013,8 +1014,6 @@ static ssize_t disksize_store(struct device *dev,
 		goto out_destroy_comp;
 		goto out_destroy_comp;
 	}
 	}
 
 
-	init_waitqueue_head(&zram->io_done);
-	atomic_set(&zram->refcount, 1);
 	zram->meta = meta;
 	zram->meta = meta;
 	zram->comp = comp;
 	zram->comp = comp;
 	zram->disksize = disksize;
 	zram->disksize = disksize;

+ 6 - 6
drivers/block/zram/zram_drv.h

@@ -61,7 +61,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
 /* Flags for zram pages (table[page_no].value) */
 /* Flags for zram pages (table[page_no].value) */
 enum zram_pageflags {
 enum zram_pageflags {
 	/* Page consists entirely of zeros */
 	/* Page consists entirely of zeros */
-	ZRAM_ZERO = ZRAM_FLAG_SHIFT,
+	ZRAM_SAME = ZRAM_FLAG_SHIFT,
 	ZRAM_ACCESS,	/* page is now accessed */
 	ZRAM_ACCESS,	/* page is now accessed */
 
 
 	__NR_ZRAM_PAGEFLAGS,
 	__NR_ZRAM_PAGEFLAGS,
@@ -71,7 +71,10 @@ enum zram_pageflags {
 
 
 /* Allocated for each disk page */
 /* Allocated for each disk page */
 struct zram_table_entry {
 struct zram_table_entry {
-	unsigned long handle;
+	union {
+		unsigned long handle;
+		unsigned long element;
+	};
 	unsigned long value;
 	unsigned long value;
 };
 };
 
 
@@ -83,7 +86,7 @@ struct zram_stats {
 	atomic64_t failed_writes;	/* can happen when memory is too low */
 	atomic64_t failed_writes;	/* can happen when memory is too low */
 	atomic64_t invalid_io;	/* non-page-aligned I/O requests */
 	atomic64_t invalid_io;	/* non-page-aligned I/O requests */
 	atomic64_t notify_free;	/* no. of swap slot free notifications */
 	atomic64_t notify_free;	/* no. of swap slot free notifications */
-	atomic64_t zero_pages;		/* no. of zero filled pages */
+	atomic64_t same_pages;		/* no. of same element filled pages */
 	atomic64_t pages_stored;	/* no. of pages currently stored */
 	atomic64_t pages_stored;	/* no. of pages currently stored */
 	atomic_long_t max_used_pages;	/* no. of maximum pages stored */
 	atomic_long_t max_used_pages;	/* no. of maximum pages stored */
 	atomic64_t writestall;		/* no. of write slow paths */
 	atomic64_t writestall;		/* no. of write slow paths */
@@ -106,9 +109,6 @@ struct zram {
 	unsigned long limit_pages;
 	unsigned long limit_pages;
 
 
 	struct zram_stats stats;
 	struct zram_stats stats;
-	atomic_t refcount; /* refcount for zram_meta */
-	/* wait all IO under all of cpu are done */
-	wait_queue_head_t io_done;
 	/*
 	/*
 	 * This is the limit on amount of *uncompressed* worth of data
 	 * This is the limit on amount of *uncompressed* worth of data
 	 * we can store in a disk.
 	 * we can store in a disk.

+ 2 - 3
drivers/char/agp/alpha-agp.c

@@ -11,15 +11,14 @@
 
 
 #include "agp.h"
 #include "agp.h"
 
 
-static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
-					struct vm_fault *vmf)
+static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
 {
 {
 	alpha_agp_info *agp = agp_bridge->dev_private_data;
 	alpha_agp_info *agp = agp_bridge->dev_private_data;
 	dma_addr_t dma_addr;
 	dma_addr_t dma_addr;
 	unsigned long pa;
 	unsigned long pa;
 	struct page *page;
 	struct page *page;
 
 
-	dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
+	dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
 	pa = agp->ops->translate(agp, dma_addr);
 	pa = agp->ops->translate(agp, dma_addr);
 
 
 	if (pa == (unsigned long)-EINVAL)
 	if (pa == (unsigned long)-EINVAL)

+ 3 - 3
drivers/char/mspec.c

@@ -191,12 +191,12 @@ mspec_close(struct vm_area_struct *vma)
  * Creates a mspec page and maps it to user space.
  * Creates a mspec page and maps it to user space.
  */
  */
 static int
 static int
-mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+mspec_fault(struct vm_fault *vmf)
 {
 {
 	unsigned long paddr, maddr;
 	unsigned long paddr, maddr;
 	unsigned long pfn;
 	unsigned long pfn;
 	pgoff_t index = vmf->pgoff;
 	pgoff_t index = vmf->pgoff;
-	struct vma_data *vdata = vma->vm_private_data;
+	struct vma_data *vdata = vmf->vma->vm_private_data;
 
 
 	maddr = (volatile unsigned long) vdata->maddr[index];
 	maddr = (volatile unsigned long) vdata->maddr[index];
 	if (maddr == 0) {
 	if (maddr == 0) {
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	 * be because another thread has installed the pte first, so it
 	 * be because another thread has installed the pte first, so it
 	 * is no problem.
 	 * is no problem.
 	 */
 	 */
-	vm_insert_pfn(vma, vmf->address, pfn);
+	vm_insert_pfn(vmf->vma, vmf->address, pfn);
 
 
 	return VM_FAULT_NOPAGE;
 	return VM_FAULT_NOPAGE;
 }
 }

+ 69 - 23
drivers/dax/dax.c

@@ -419,8 +419,7 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
 	return -1;
 	return -1;
 }
 }
 
 
-static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
-		struct vm_fault *vmf)
+static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 {
 {
 	struct device *dev = &dax_dev->dev;
 	struct device *dev = &dax_dev->dev;
 	struct dax_region *dax_region;
 	struct dax_region *dax_region;
@@ -428,7 +427,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 	phys_addr_t phys;
 	phys_addr_t phys;
 	pfn_t pfn;
 	pfn_t pfn;
 
 
-	if (check_vma(dax_dev, vma, __func__))
+	if (check_vma(dax_dev, vmf->vma, __func__))
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 
 
 	dax_region = dax_dev->region;
 	dax_region = dax_dev->region;
@@ -446,7 +445,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 
 
 	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
 
-	rc = vm_insert_mixed(vma, vmf->address, pfn);
+	rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
 
 
 	if (rc == -ENOMEM)
 	if (rc == -ENOMEM)
 		return VM_FAULT_OOM;
 		return VM_FAULT_OOM;
@@ -456,22 +455,6 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 	return VM_FAULT_NOPAGE;
 	return VM_FAULT_NOPAGE;
 }
 }
 
 
-static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	int rc;
-	struct file *filp = vma->vm_file;
-	struct dax_dev *dax_dev = filp->private_data;
-
-	dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
-			current->comm, (vmf->flags & FAULT_FLAG_WRITE)
-			? "write" : "read", vma->vm_start, vma->vm_end);
-	rcu_read_lock();
-	rc = __dax_dev_fault(dax_dev, vma, vmf);
-	rcu_read_unlock();
-
-	return rc;
-}
-
 static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 {
 {
 	unsigned long pmd_addr = vmf->address & PMD_MASK;
 	unsigned long pmd_addr = vmf->address & PMD_MASK;
@@ -510,7 +493,53 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 			vmf->flags & FAULT_FLAG_WRITE);
 			vmf->flags & FAULT_FLAG_WRITE);
 }
 }
 
 
-static int dax_dev_pmd_fault(struct vm_fault *vmf)
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
+{
+	unsigned long pud_addr = vmf->address & PUD_MASK;
+	struct device *dev = &dax_dev->dev;
+	struct dax_region *dax_region;
+	phys_addr_t phys;
+	pgoff_t pgoff;
+	pfn_t pfn;
+
+	if (check_vma(dax_dev, vmf->vma, __func__))
+		return VM_FAULT_SIGBUS;
+
+	dax_region = dax_dev->region;
+	if (dax_region->align > PUD_SIZE) {
+		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+		return VM_FAULT_SIGBUS;
+	}
+
+	/* dax pud mappings require pfn_t_devmap() */
+	if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+		return VM_FAULT_SIGBUS;
+	}
+
+	pgoff = linear_page_index(vmf->vma, pud_addr);
+	phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
+	if (phys == -1) {
+		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+				pgoff);
+		return VM_FAULT_SIGBUS;
+	}
+
+	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+	return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
+			vmf->flags & FAULT_FLAG_WRITE);
+}
+#else
+static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
+{
+	return VM_FAULT_FALLBACK;
+}
+#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+static int dax_dev_huge_fault(struct vm_fault *vmf,
+		enum page_entry_size pe_size)
 {
 {
 	int rc;
 	int rc;
 	struct file *filp = vmf->vma->vm_file;
 	struct file *filp = vmf->vma->vm_file;
@@ -522,15 +551,32 @@ static int dax_dev_pmd_fault(struct vm_fault *vmf)
 			vmf->vma->vm_start, vmf->vma->vm_end);
 			vmf->vma->vm_start, vmf->vma->vm_end);
 
 
 	rcu_read_lock();
 	rcu_read_lock();
-	rc = __dax_dev_pmd_fault(dax_dev, vmf);
+	switch (pe_size) {
+	case PE_SIZE_PTE:
+		rc = __dax_dev_pte_fault(dax_dev, vmf);
+		break;
+	case PE_SIZE_PMD:
+		rc = __dax_dev_pmd_fault(dax_dev, vmf);
+		break;
+	case PE_SIZE_PUD:
+		rc = __dax_dev_pud_fault(dax_dev, vmf);
+		break;
+	default:
+		return VM_FAULT_FALLBACK;
+	}
 	rcu_read_unlock();
 	rcu_read_unlock();
 
 
 	return rc;
 	return rc;
 }
 }
 
 
+static int dax_dev_fault(struct vm_fault *vmf)
+{
+	return dax_dev_huge_fault(vmf, PE_SIZE_PTE);
+}
+
 static const struct vm_operations_struct dax_dev_vm_ops = {
 static const struct vm_operations_struct dax_dev_vm_ops = {
 	.fault = dax_dev_fault,
 	.fault = dax_dev_fault,
-	.pmd_fault = dax_dev_pmd_fault,
+	.huge_fault = dax_dev_huge_fault,
 };
 };
 
 
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)

+ 5 - 4
drivers/gpu/drm/armada/armada_gem.c

@@ -14,14 +14,15 @@
 #include <drm/armada_drm.h>
 #include <drm/armada_drm.h>
 #include "armada_ioctlP.h"
 #include "armada_ioctlP.h"
 
 
-static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int armada_gem_vm_fault(struct vm_fault *vmf)
 {
 {
-	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
+	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 	int ret;
 	int ret;
 
 
-	pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-	ret = vm_insert_pfn(vma, vmf->address, pfn);
+	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
+	ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
 
 
 	switch (ret) {
 	switch (ret) {
 	case 0:
 	case 0:

+ 9 - 27
drivers/gpu/drm/drm_vm.c

@@ -96,8 +96,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  * map, get the page, increment the use count and return it.
  * map, get the page, increment the use count and return it.
  */
  */
 #if IS_ENABLED(CONFIG_AGP)
 #if IS_ENABLED(CONFIG_AGP)
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_local_map *map = NULL;
 	struct drm_local_map *map = NULL;
@@ -168,7 +169,7 @@ vm_fault_error:
 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
 }
 }
 #else
 #else
-static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_fault(struct vm_fault *vmf)
 {
 {
 	return VM_FAULT_SIGBUS;
 	return VM_FAULT_SIGBUS;
 }
 }
@@ -184,8 +185,9 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  * Get the mapping, find the real physical page to map, get the page, and
  * Get the mapping, find the real physical page to map, get the page, and
  * return it.
  * return it.
  */
  */
-static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_shm_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_local_map *map = vma->vm_private_data;
 	struct drm_local_map *map = vma->vm_private_data;
 	unsigned long offset;
 	unsigned long offset;
 	unsigned long i;
 	unsigned long i;
@@ -280,14 +282,14 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 /**
 /**
  * \c fault method for DMA virtual memory.
  * \c fault method for DMA virtual memory.
  *
  *
- * \param vma virtual memory area.
  * \param address access address.
  * \param address access address.
  * \return pointer to the page structure.
  * \return pointer to the page structure.
  *
  *
  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  */
  */
-static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_dma_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_device_dma *dma = dev->dma;
 	struct drm_device_dma *dma = dev->dma;
@@ -315,14 +317,14 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 /**
 /**
  * \c fault method for scatter-gather virtual memory.
  * \c fault method for scatter-gather virtual memory.
  *
  *
- * \param vma virtual memory area.
  * \param address access address.
  * \param address access address.
  * \return pointer to the page structure.
  * \return pointer to the page structure.
  *
  *
  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  */
  */
-static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int drm_vm_sg_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_local_map *map = vma->vm_private_data;
 	struct drm_local_map *map = vma->vm_private_data;
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_device *dev = priv->minor->dev;
@@ -347,26 +349,6 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	return 0;
 	return 0;
 }
 }
 
 
-static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	return drm_do_vm_fault(vma, vmf);
-}
-
-static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	return drm_do_vm_shm_fault(vma, vmf);
-}
-
-static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	return drm_do_vm_dma_fault(vma, vmf);
-}
-
-static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	return drm_do_vm_sg_fault(vma, vmf);
-}
-
 /** AGP virtual memory operations */
 /** AGP virtual memory operations */
 static const struct vm_operations_struct drm_vm_ops = {
 static const struct vm_operations_struct drm_vm_ops = {
 	.fault = drm_vm_fault,
 	.fault = drm_vm_fault,

+ 1 - 1
drivers/gpu/drm/etnaviv/etnaviv_drv.h

@@ -73,7 +73,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
 		struct drm_file *file);
 
 
 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int etnaviv_gem_fault(struct vm_fault *vmf);
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);

+ 2 - 1
drivers/gpu/drm/etnaviv/etnaviv_gem.c

@@ -175,8 +175,9 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	return obj->ops->mmap(obj, vma);
 	return obj->ops->mmap(obj, vma);
 }
 }
 
 
-int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int etnaviv_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct page **pages, *page;
 	struct page **pages, *page;

+ 2 - 1
drivers/gpu/drm/exynos/exynos_drm_gem.c

@@ -447,8 +447,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
 	return ret;
 	return ret;
 }
 }
 
 
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int exynos_drm_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 	unsigned long pfn;
 	unsigned long pfn;

+ 1 - 1
drivers/gpu/drm/exynos/exynos_drm_gem.h

@@ -116,7 +116,7 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
 				   uint64_t *offset);
 				   uint64_t *offset);
 
 
 /* page fault handler and mmap fault address(virtual) to physical memory. */
 /* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int exynos_drm_gem_fault(struct vm_fault *vmf);
 
 
 /* set vm_flags and we can change the vm attribute to other one at here. */
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);

+ 2 - 1
drivers/gpu/drm/gma500/framebuffer.c

@@ -111,8 +111,9 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
         return 0;
         return 0;
 }
 }
 
 
-static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int psbfb_vm_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct psb_framebuffer *psbfb = vma->vm_private_data;
 	struct psb_framebuffer *psbfb = vma->vm_private_data;
 	struct drm_device *dev = psbfb->base.dev;
 	struct drm_device *dev = psbfb->base.dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct drm_psb_private *dev_priv = dev->dev_private;

+ 2 - 1
drivers/gpu/drm/gma500/gem.c

@@ -164,8 +164,9 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  *	vma->vm_private_data points to the GEM object that is backing this
  *	vma->vm_private_data points to the GEM object that is backing this
  *	mapping.
  *	mapping.
  */
  */
-int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int psb_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
 	struct gtt_range *r;
 	struct gtt_range *r;
 	int ret;
 	int ret;

+ 1 - 1
drivers/gpu/drm/gma500/psb_drv.h

@@ -752,7 +752,7 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 			struct drm_mode_create_dumb *args);
 			struct drm_mode_create_dumb *args);
 extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
 extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
 			uint32_t handle, uint64_t *offset);
 			uint32_t handle, uint64_t *offset);
-extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_fault(struct vm_fault *vmf);
 
 
 /* psb_device.c */
 /* psb_device.c */
 extern const struct psb_ops psb_chip_ops;
 extern const struct psb_ops psb_chip_ops;

+ 1 - 1
drivers/gpu/drm/i915/i915_drv.h

@@ -3352,7 +3352,7 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 					unsigned int flags);
 					unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int i915_gem_fault(struct vm_fault *vmf);
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 unsigned int flags,
 			 unsigned int flags,
 			 long timeout,
 			 long timeout,

+ 2 - 2
drivers/gpu/drm/i915/i915_gem.c

@@ -1772,7 +1772,6 @@ compute_partial_view(struct drm_i915_gem_object *obj,
 
 
 /**
 /**
  * i915_gem_fault - fault a page into the GTT
  * i915_gem_fault - fault a page into the GTT
- * @area: CPU VMA in question
  * @vmf: fault info
  * @vmf: fault info
  *
  *
  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
@@ -1789,9 +1788,10 @@ compute_partial_view(struct drm_i915_gem_object *obj,
  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
  */
-int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
+int i915_gem_fault(struct vm_fault *vmf)
 {
 {
 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+	struct vm_area_struct *area = vmf->vma;
 	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
 	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
 	struct drm_device *dev = obj->base.dev;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);

+ 1 - 1
drivers/gpu/drm/msm/msm_drv.h

@@ -206,7 +206,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 			struct vm_area_struct *vma);
 			struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 		uint64_t *iova);
 		uint64_t *iova);

+ 2 - 1
drivers/gpu/drm/msm/msm_gem.c

@@ -191,8 +191,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
 }
 }
 
 
-int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int msm_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_device *dev = obj->dev;
 	struct drm_device *dev = obj->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_drm_private *priv = dev->dev_private;

+ 1 - 1
drivers/gpu/drm/omapdrm/omap_drv.h

@@ -188,7 +188,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int omap_gem_mmap_obj(struct drm_gem_object *obj,
 int omap_gem_mmap_obj(struct drm_gem_object *obj,
 		struct vm_area_struct *vma);
 		struct vm_area_struct *vma);
-int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_fault(struct vm_fault *vmf);
 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);

+ 2 - 2
drivers/gpu/drm/omapdrm/omap_gem.c

@@ -518,7 +518,6 @@ static int fault_2d(struct drm_gem_object *obj,
 
 
 /**
 /**
  * omap_gem_fault		-	pagefault handler for GEM objects
  * omap_gem_fault		-	pagefault handler for GEM objects
- * @vma: the VMA of the GEM object
  * @vmf: fault detail
  * @vmf: fault detail
  *
  *
  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
@@ -529,8 +528,9 @@ static int fault_2d(struct drm_gem_object *obj,
  * vma->vm_private_data points to the GEM object that is backing this
  * vma->vm_private_data points to the GEM object that is backing this
  * mapping.
  * mapping.
  */
  */
-int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int omap_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 	struct drm_device *dev = obj->dev;
 	struct drm_device *dev = obj->dev;

+ 3 - 3
drivers/gpu/drm/qxl/qxl_ttm.c

@@ -105,15 +105,15 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
 static struct vm_operations_struct qxl_ttm_vm_ops;
 static struct vm_operations_struct qxl_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 
 
-static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int qxl_ttm_fault(struct vm_fault *vmf)
 {
 {
 	struct ttm_buffer_object *bo;
 	struct ttm_buffer_object *bo;
 	int r;
 	int r;
 
 
-	bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
 	if (bo == NULL)
 	if (bo == NULL)
 		return VM_FAULT_NOPAGE;
 		return VM_FAULT_NOPAGE;
-	r = ttm_vm_ops->fault(vma, vmf);
+	r = ttm_vm_ops->fault(vmf);
 	return r;
 	return r;
 }
 }
 
 

+ 3 - 3
drivers/gpu/drm/radeon/radeon_ttm.c

@@ -979,19 +979,19 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 static struct vm_operations_struct radeon_ttm_vm_ops;
 static struct vm_operations_struct radeon_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
 
-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int radeon_ttm_fault(struct vm_fault *vmf)
 {
 {
 	struct ttm_buffer_object *bo;
 	struct ttm_buffer_object *bo;
 	struct radeon_device *rdev;
 	struct radeon_device *rdev;
 	int r;
 	int r;
 
 
-	bo = (struct ttm_buffer_object *)vma->vm_private_data;	
+	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
 	if (bo == NULL) {
 	if (bo == NULL) {
 		return VM_FAULT_NOPAGE;
 		return VM_FAULT_NOPAGE;
 	}
 	}
 	rdev = radeon_get_rdev(bo->bdev);
 	rdev = radeon_get_rdev(bo->bdev);
 	down_read(&rdev->pm.mclk_lock);
 	down_read(&rdev->pm.mclk_lock);
-	r = ttm_vm_ops->fault(vma, vmf);
+	r = ttm_vm_ops->fault(vmf);
 	up_read(&rdev->pm.mclk_lock);
 	up_read(&rdev->pm.mclk_lock);
 	return r;
 	return r;
 }
 }

+ 2 - 1
drivers/gpu/drm/tegra/gem.c

@@ -441,8 +441,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
 	return 0;
 	return 0;
 }
 }
 
 
-static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int tegra_bo_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *gem = vma->vm_private_data;
 	struct drm_gem_object *gem = vma->vm_private_data;
 	struct tegra_bo *bo = to_tegra_bo(gem);
 	struct tegra_bo *bo = to_tegra_bo(gem);
 	struct page *page;
 	struct page *page;

+ 5 - 5
drivers/gpu/drm/ttm/ttm_bo_vm.c

@@ -43,7 +43,6 @@
 #define TTM_BO_VM_NUM_PREFAULT 16
 #define TTM_BO_VM_NUM_PREFAULT 16
 
 
 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
-				struct vm_area_struct *vma,
 				struct vm_fault *vmf)
 				struct vm_fault *vmf)
 {
 {
 	int ret = 0;
 	int ret = 0;
@@ -67,7 +66,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 			goto out_unlock;
 			goto out_unlock;
 
 
 		ttm_bo_reference(bo);
 		ttm_bo_reference(bo);
-		up_read(&vma->vm_mm->mmap_sem);
+		up_read(&vmf->vma->vm_mm->mmap_sem);
 		(void) dma_fence_wait(bo->moving, true);
 		(void) dma_fence_wait(bo->moving, true);
 		ttm_bo_unreserve(bo);
 		ttm_bo_unreserve(bo);
 		ttm_bo_unref(&bo);
 		ttm_bo_unref(&bo);
@@ -92,8 +91,9 @@ out_unlock:
 	return ret;
 	return ret;
 }
 }
 
 
-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ttm_bo_vm_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 	    vma->vm_private_data;
 	    vma->vm_private_data;
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -124,7 +124,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 				ttm_bo_reference(bo);
 				ttm_bo_reference(bo);
-				up_read(&vma->vm_mm->mmap_sem);
+				up_read(&vmf->vma->vm_mm->mmap_sem);
 				(void) ttm_bo_wait_unreserved(bo);
 				(void) ttm_bo_wait_unreserved(bo);
 				ttm_bo_unref(&bo);
 				ttm_bo_unref(&bo);
 			}
 			}
@@ -168,7 +168,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	 * Wait for buffer data in transit, due to a pipelined
 	 * Wait for buffer data in transit, due to a pipelined
 	 * move.
 	 * move.
 	 */
 	 */
-	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+	ret = ttm_bo_vm_fault_idle(bo, vmf);
 	if (unlikely(ret != 0)) {
 	if (unlikely(ret != 0)) {
 		retval = ret;
 		retval = ret;
 
 

+ 1 - 1
drivers/gpu/drm/udl/udl_drv.h

@@ -134,7 +134,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
 int udl_gem_vmap(struct udl_gem_object *obj);
 int udl_gem_vmap(struct udl_gem_object *obj);
 void udl_gem_vunmap(struct udl_gem_object *obj);
 void udl_gem_vunmap(struct udl_gem_object *obj);
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int udl_gem_fault(struct vm_fault *vmf);
 
 
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 		      int width, int height);
 		      int width, int height);

+ 2 - 1
drivers/gpu/drm/udl/udl_gem.c

@@ -100,8 +100,9 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	return ret;
 	return ret;
 }
 }
 
 
-int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int udl_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
 	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
 	struct page *page;
 	struct page *page;
 	unsigned int page_offset;
 	unsigned int page_offset;

+ 2 - 1
drivers/gpu/drm/vgem/vgem_drv.c

@@ -50,8 +50,9 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
 	kfree(vgem_obj);
 	kfree(vgem_obj);
 }
 }
 
 
-static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int vgem_gem_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
 	/* We don't use vmf->pgoff since that has the fake offset */
 	/* We don't use vmf->pgoff since that has the fake offset */
 	unsigned long vaddr = vmf->address;
 	unsigned long vaddr = vmf->address;

+ 3 - 4
drivers/gpu/drm/virtio/virtgpu_ttm.c

@@ -114,18 +114,17 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
 static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
 static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 
 
-static int virtio_gpu_ttm_fault(struct vm_area_struct *vma,
-				struct vm_fault *vmf)
+static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
 {
 {
 	struct ttm_buffer_object *bo;
 	struct ttm_buffer_object *bo;
 	struct virtio_gpu_device *vgdev;
 	struct virtio_gpu_device *vgdev;
 	int r;
 	int r;
 
 
-	bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
 	if (bo == NULL)
 	if (bo == NULL)
 		return VM_FAULT_NOPAGE;
 		return VM_FAULT_NOPAGE;
 	vgdev = virtio_gpu_get_vgdev(bo->bdev);
 	vgdev = virtio_gpu_get_vgdev(bo->bdev);
-	r = ttm_vm_ops->fault(vma, vmf);
+	r = ttm_vm_ops->fault(vmf);
 	return r;
 	return r;
 }
 }
 #endif
 #endif

+ 2 - 2
drivers/hsi/clients/cmt_speech.c

@@ -1098,9 +1098,9 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
 	kfree(hi);
 	kfree(hi);
 }
 }
 
 
-static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int cs_char_vma_fault(struct vm_fault *vmf)
 {
 {
-	struct cs_char *csdata = vma->vm_private_data;
+	struct cs_char *csdata = vmf->vma->vm_private_data;
 	struct page *page;
 	struct page *page;
 
 
 	page = virt_to_page(csdata->mmap_base);
 	page = virt_to_page(csdata->mmap_base);

+ 3 - 3
drivers/hwtracing/intel_th/msu.c

@@ -1188,9 +1188,9 @@ static void msc_mmap_close(struct vm_area_struct *vma)
 	mutex_unlock(&msc->buf_mutex);
 	mutex_unlock(&msc->buf_mutex);
 }
 }
 
 
-static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int msc_mmap_fault(struct vm_fault *vmf)
 {
 {
-	struct msc_iter *iter = vma->vm_file->private_data;
+	struct msc_iter *iter = vmf->vma->vm_file->private_data;
 	struct msc *msc = iter->msc;
 	struct msc *msc = iter->msc;
 
 
 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
@@ -1198,7 +1198,7 @@ static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 
 
 	get_page(vmf->page);
 	get_page(vmf->page);
-	vmf->page->mapping = vma->vm_file->f_mapping;
+	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
 	vmf->page->index = vmf->pgoff;
 	vmf->page->index = vmf->pgoff;
 
 
 	return 0;
 	return 0;

+ 2 - 2
drivers/infiniband/hw/hfi1/file_ops.c

@@ -92,7 +92,7 @@ static unsigned int poll_next(struct file *, struct poll_table_struct *);
 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
-static int vma_fault(struct vm_area_struct *, struct vm_fault *);
+static int vma_fault(struct vm_fault *);
 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 			    unsigned long arg);
 			    unsigned long arg);
 
 
@@ -695,7 +695,7 @@ done:
  * Local (non-chip) user memory is not mapped right away but as it is
  * Local (non-chip) user memory is not mapped right away but as it is
  * accessed by the user-level code.
  * accessed by the user-level code.
  */
  */
-static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int vma_fault(struct vm_fault *vmf)
 {
 {
 	struct page *page;
 	struct page *page;
 
 

+ 1 - 1
drivers/infiniband/hw/qib/qib_file_ops.c

@@ -893,7 +893,7 @@ bail:
 /*
 /*
  * qib_file_vma_fault - handle a VMA page fault.
  * qib_file_vma_fault - handle a VMA page fault.
  */
  */
-static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int qib_file_vma_fault(struct vm_fault *vmf)
 {
 {
 	struct page *page;
 	struct page *page;
 
 

+ 1 - 1
drivers/iommu/amd_iommu.c

@@ -2672,7 +2672,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
 			return NULL;
 			return NULL;
 
 
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
-						 get_order(size));
+						 get_order(size), flag);
 		if (!page)
 		if (!page)
 			return NULL;
 			return NULL;
 	}
 	}

+ 1 - 1
drivers/iommu/intel-iommu.c

@@ -3829,7 +3829,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
 	if (gfpflags_allow_blocking(flags)) {
 	if (gfpflags_allow_blocking(flags)) {
 		unsigned int count = size >> PAGE_SHIFT;
 		unsigned int count = size >> PAGE_SHIFT;
 
 
-		page = dma_alloc_from_contiguous(dev, count, order);
+		page = dma_alloc_from_contiguous(dev, count, order, flags);
 		if (page && iommu_no_mapping(dev) &&
 		if (page && iommu_no_mapping(dev) &&
 		    page_to_phys(page) + size > dev->coherent_dma_mask) {
 		    page_to_phys(page) + size > dev->coherent_dma_mask) {
 			dma_release_from_contiguous(dev, page, count);
 			dma_release_from_contiguous(dev, page, count);

+ 2 - 1
drivers/media/v4l2-core/videobuf-dma-sg.c

@@ -434,8 +434,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
  * now ...).  Bounce buffers don't work very well for the data rates
  * now ...).  Bounce buffers don't work very well for the data rates
  * video capture has.
  * video capture has.
  */
  */
-static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int videobuf_vm_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct page *page;
 	struct page *page;
 
 
 	dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
 	dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",

+ 2 - 1
drivers/misc/cxl/context.c

@@ -121,8 +121,9 @@ void cxl_context_set_mapping(struct cxl_context *ctx,
 	mutex_unlock(&ctx->mapping_lock);
 	mutex_unlock(&ctx->mapping_lock);
 }
 }
 
 
-static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int cxl_mmap_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct cxl_context *ctx = vma->vm_file->private_data;
 	struct cxl_context *ctx = vma->vm_file->private_data;
 	u64 area, offset;
 	u64 area, offset;
 
 

+ 2 - 1
drivers/misc/sgi-gru/grumain.c

@@ -926,8 +926,9 @@ again:
  *
  *
  * 	Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
  * 	Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
  */
  */
-int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int gru_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct gru_thread_state *gts;
 	struct gru_thread_state *gts;
 	unsigned long paddr, vaddr;
 	unsigned long paddr, vaddr;
 	unsigned long expires;
 	unsigned long expires;

+ 1 - 1
drivers/misc/sgi-gru/grutables.h

@@ -665,7 +665,7 @@ extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
 		int cbr_au_count, char *cbmap);
 		int cbr_au_count, char *cbmap);
 extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
 extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
 		int dsr_au_count, char *dsmap);
 		int dsr_au_count, char *dsmap);
-extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf);
+extern int gru_fault(struct vm_fault *vmf);
 extern struct gru_mm_struct *gru_register_mmu_notifier(void);
 extern struct gru_mm_struct *gru_register_mmu_notifier(void);
 extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
 extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
 
 

+ 3 - 3
drivers/scsi/cxlflash/superpipe.c

@@ -1053,7 +1053,6 @@ out:
 
 
 /**
 /**
  * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
  * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
- * @vma:	VM area associated with mapping.
  * @vmf:	VM fault associated with current fault.
  * @vmf:	VM fault associated with current fault.
  *
  *
  * To support error notification via MMIO, faults are 'caught' by this routine
  * To support error notification via MMIO, faults are 'caught' by this routine
@@ -1067,8 +1066,9 @@ out:
  *
  *
  * Return: 0 on success, VM_FAULT_SIGBUS on failure
  * Return: 0 on success, VM_FAULT_SIGBUS on failure
  */
  */
-static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int cxlflash_mmap_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	struct file *file = vma->vm_file;
 	struct file *file = vma->vm_file;
 	struct cxl_context *ctx = cxl_fops_get_context(file);
 	struct cxl_context *ctx = cxl_fops_get_context(file);
 	struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
 	struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
@@ -1097,7 +1097,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 	if (likely(!ctxi->err_recovery_active)) {
 	if (likely(!ctxi->err_recovery_active)) {
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-		rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
+		rc = ctxi->cxl_mmap_vmops->fault(vmf);
 	} else {
 	} else {
 		dev_dbg(dev, "%s: err recovery active, use err_page\n",
 		dev_dbg(dev, "%s: err recovery active, use err_page\n",
 			__func__);
 			__func__);

+ 2 - 1
drivers/scsi/sg.c

@@ -1185,8 +1185,9 @@ sg_fasync(int fd, struct file *filp, int mode)
 }
 }
 
 
 static int
 static int
-sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+sg_vma_fault(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	Sg_fd *sfp;
 	Sg_fd *sfp;
 	unsigned long offset, len, sa;
 	unsigned long offset, len, sa;
 	Sg_scatter_hold *rsv_schp;
 	Sg_scatter_hold *rsv_schp;

+ 3 - 3
drivers/staging/android/ion/ion.c

@@ -870,9 +870,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
 	mutex_unlock(&buffer->lock);
 	mutex_unlock(&buffer->lock);
 }
 }
 
 
-static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ion_vm_fault(struct vm_fault *vmf)
 {
 {
-	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_buffer *buffer = vmf->vma->vm_private_data;
 	unsigned long pfn;
 	unsigned long pfn;
 	int ret;
 	int ret;
 
 
@@ -881,7 +881,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 
 
 	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
 	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
-	ret = vm_insert_pfn(vma, vmf->address, pfn);
+	ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
 	mutex_unlock(&buffer->lock);
 	mutex_unlock(&buffer->lock);
 	if (ret)
 	if (ret)
 		return VM_FAULT_ERROR;
 		return VM_FAULT_ERROR;

+ 4 - 3
drivers/staging/lustre/lustre/llite/llite_mmap.c

@@ -321,7 +321,7 @@ out:
 	return fault_ret;
 	return fault_ret;
 }
 }
 
 
-static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ll_fault(struct vm_fault *vmf)
 {
 {
 	int count = 0;
 	int count = 0;
 	bool printed = false;
 	bool printed = false;
@@ -335,7 +335,7 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 
 
 restart:
 restart:
-	result = ll_fault0(vma, vmf);
+	result = ll_fault0(vmf->vma, vmf);
 	LASSERT(!(result & VM_FAULT_LOCKED));
 	LASSERT(!(result & VM_FAULT_LOCKED));
 	if (result == 0) {
 	if (result == 0) {
 		struct page *vmpage = vmf->page;
 		struct page *vmpage = vmf->page;
@@ -362,8 +362,9 @@ restart:
 	return result;
 	return result;
 }
 }
 
 
-static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ll_page_mkwrite(struct vm_fault *vmf)
 {
 {
+	struct vm_area_struct *vma = vmf->vma;
 	int count = 0;
 	int count = 0;
 	bool printed = false;
 	bool printed = false;
 	bool retry;
 	bool retry;

+ 1 - 1
drivers/staging/lustre/lustre/llite/vvp_io.c

@@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 {
 {
 	struct vm_fault *vmf = cfio->ft_vmf;
 	struct vm_fault *vmf = cfio->ft_vmf;
 
 
-	cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
+	cfio->ft_flags = filemap_fault(vmf);
 	cfio->ft_flags_valid = 1;
 	cfio->ft_flags_valid = 1;
 
 
 	if (vmf->page) {
 	if (vmf->page) {

+ 3 - 3
drivers/target/target_core_user.c

@@ -783,15 +783,15 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
 	return -1;
 	return -1;
 }
 }
 
 
-static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int tcmu_vma_fault(struct vm_fault *vmf)
 {
 {
-	struct tcmu_dev *udev = vma->vm_private_data;
+	struct tcmu_dev *udev = vmf->vma->vm_private_data;
 	struct uio_info *info = &udev->uio_info;
 	struct uio_info *info = &udev->uio_info;
 	struct page *page;
 	struct page *page;
 	unsigned long offset;
 	unsigned long offset;
 	void *addr;
 	void *addr;
 
 
-	int mi = tcmu_find_mem_index(vma);
+	int mi = tcmu_find_mem_index(vmf->vma);
 	if (mi < 0)
 	if (mi < 0)
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 
 

+ 3 - 3
drivers/uio/uio.c

@@ -597,14 +597,14 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
 	return -1;
 	return -1;
 }
 }
 
 
-static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int uio_vma_fault(struct vm_fault *vmf)
 {
 {
-	struct uio_device *idev = vma->vm_private_data;
+	struct uio_device *idev = vmf->vma->vm_private_data;
 	struct page *page;
 	struct page *page;
 	unsigned long offset;
 	unsigned long offset;
 	void *addr;
 	void *addr;
 
 
-	int mi = uio_find_mem_index(vma);
+	int mi = uio_find_mem_index(vmf->vma);
 	if (mi < 0)
 	if (mi < 0)
 		return VM_FAULT_SIGBUS;
 		return VM_FAULT_SIGBUS;
 
 

+ 2 - 2
drivers/usb/mon/mon_bin.c

@@ -1223,9 +1223,9 @@ static void mon_bin_vma_close(struct vm_area_struct *vma)
 /*
 /*
  * Map ring pages to user space.
  * Map ring pages to user space.
  */
  */
-static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int mon_bin_vma_fault(struct vm_fault *vmf)
 {
 {
-	struct mon_reader_bin *rp = vma->vm_private_data;
+	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
 	unsigned long offset, chunk_idx;
 	unsigned long offset, chunk_idx;
 	struct page *pageptr;
 	struct page *pageptr;
 
 

+ 7 - 9
drivers/video/fbdev/core/fb_defio.c

@@ -37,12 +37,11 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs
 }
 }
 
 
 /* this is to find and return the vmalloc-ed fb pages */
 /* this is to find and return the vmalloc-ed fb pages */
-static int fb_deferred_io_fault(struct vm_area_struct *vma,
-				struct vm_fault *vmf)
+static int fb_deferred_io_fault(struct vm_fault *vmf)
 {
 {
 	unsigned long offset;
 	unsigned long offset;
 	struct page *page;
 	struct page *page;
-	struct fb_info *info = vma->vm_private_data;
+	struct fb_info *info = vmf->vma->vm_private_data;
 
 
 	offset = vmf->pgoff << PAGE_SHIFT;
 	offset = vmf->pgoff << PAGE_SHIFT;
 	if (offset >= info->fix.smem_len)
 	if (offset >= info->fix.smem_len)
@@ -54,8 +53,8 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
 
 
 	get_page(page);
 	get_page(page);
 
 
-	if (vma->vm_file)
-		page->mapping = vma->vm_file->f_mapping;
+	if (vmf->vma->vm_file)
+		page->mapping = vmf->vma->vm_file->f_mapping;
 	else
 	else
 		printk(KERN_ERR "no mapping available\n");
 		printk(KERN_ERR "no mapping available\n");
 
 
@@ -91,11 +90,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
 
 
 /* vm_ops->page_mkwrite handler */
 /* vm_ops->page_mkwrite handler */
-static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
-				  struct vm_fault *vmf)
+static int fb_deferred_io_mkwrite(struct vm_fault *vmf)
 {
 {
 	struct page *page = vmf->page;
 	struct page *page = vmf->page;
-	struct fb_info *info = vma->vm_private_data;
+	struct fb_info *info = vmf->vma->vm_private_data;
 	struct fb_deferred_io *fbdefio = info->fbdefio;
 	struct fb_deferred_io *fbdefio = info->fbdefio;
 	struct page *cur;
 	struct page *cur;
 
 
@@ -105,7 +103,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
 	deferred framebuffer IO. then if userspace touches a page
 	deferred framebuffer IO. then if userspace touches a page
 	again, we repeat the same scheme */
 	again, we repeat the same scheme */
 
 
-	file_update_time(vma->vm_file);
+	file_update_time(vmf->vma->vm_file);
 
 
 	/* protect against the workqueue changing the page list */
 	/* protect against the workqueue changing the page list */
 	mutex_lock(&fbdefio->lock);
 	mutex_lock(&fbdefio->lock);

Some files were not shown because too many files changed in this diff