Эх сурвалжийг харах

mm: mlock: add new mlock system call

With the refactored mlock code, introduce a new system call for mlock.
The new call will allow the user to specify what lock states are being
added.  mlock2 is trivial at the moment, but a follow on patch will add a
new mlock state making it useful.

Signed-off-by: Eric B Munson <emunson@akamai.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Shuah Khan <shuahkh@osg.samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Eric B Munson 9 жил өмнө
parent
commit
a8ca5d0ecb

+ 1 - 0
arch/x86/entry/syscalls/syscall_32.tbl

@@ -382,3 +382,4 @@
 373	i386	shutdown		sys_shutdown
 373	i386	shutdown		sys_shutdown
 374	i386	userfaultfd		sys_userfaultfd
 374	i386	userfaultfd		sys_userfaultfd
 375	i386	membarrier		sys_membarrier
 375	i386	membarrier		sys_membarrier
+376	i386	mlock2			sys_mlock2

+ 1 - 0
arch/x86/entry/syscalls/syscall_64.tbl

@@ -331,6 +331,7 @@
 322	64	execveat		stub_execveat
 322	64	execveat		stub_execveat
 323	common	userfaultfd		sys_userfaultfd
 323	common	userfaultfd		sys_userfaultfd
 324	common	membarrier		sys_membarrier
 324	common	membarrier		sys_membarrier
+325	common	mlock2			sys_mlock2
 
 
 #
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 # x32-specific system call numbers start at 512 to avoid cache impact

+ 2 - 0
include/linux/syscalls.h

@@ -887,4 +887,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename,
 
 
 asmlinkage long sys_membarrier(int cmd, int flags);
 asmlinkage long sys_membarrier(int cmd, int flags);
 
 
+asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
+
 #endif
 #endif

+ 3 - 1
include/uapi/asm-generic/unistd.h

@@ -713,9 +713,11 @@ __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 #define __NR_membarrier 283
 #define __NR_membarrier 283
 __SYSCALL(__NR_membarrier, sys_membarrier)
 __SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 284
+__SYSCALL(__NR_mlock2, sys_mlock2)
 
 
 #undef __NR_syscalls
 #undef __NR_syscalls
-#define __NR_syscalls 284
+#define __NR_syscalls 285
 
 
 /*
 /*
  * All syscalls below here should go away really,
  * All syscalls below here should go away really,

+ 1 - 0
kernel/sys_ni.c

@@ -194,6 +194,7 @@ cond_syscall(sys_mlock);
 cond_syscall(sys_munlock);
 cond_syscall(sys_munlock);
 cond_syscall(sys_mlockall);
 cond_syscall(sys_mlockall);
 cond_syscall(sys_munlockall);
 cond_syscall(sys_munlockall);
+cond_syscall(sys_mlock2);
 cond_syscall(sys_mincore);
 cond_syscall(sys_mincore);
 cond_syscall(sys_madvise);
 cond_syscall(sys_madvise);
 cond_syscall(sys_mremap);
 cond_syscall(sys_mremap);

+ 8 - 0
mm/mlock.c

@@ -644,6 +644,14 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 	return do_mlock(start, len, VM_LOCKED);
 	return do_mlock(start, len, VM_LOCKED);
 }
 }
 
 
+SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
+{
+	if (flags)
+		return -EINVAL;
+
+	return do_mlock(start, len, VM_LOCKED);
+}
+
 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 {
 {
 	int ret;
 	int ret;