umh.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. /*
  2. * umh - the kernel usermode helper
  3. */
  4. #include <linux/module.h>
  5. #include <linux/sched.h>
  6. #include <linux/sched/task.h>
  7. #include <linux/binfmts.h>
  8. #include <linux/syscalls.h>
  9. #include <linux/unistd.h>
  10. #include <linux/kmod.h>
  11. #include <linux/slab.h>
  12. #include <linux/completion.h>
  13. #include <linux/cred.h>
  14. #include <linux/file.h>
  15. #include <linux/fdtable.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/security.h>
  18. #include <linux/mount.h>
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/resource.h>
  22. #include <linux/notifier.h>
  23. #include <linux/suspend.h>
  24. #include <linux/rwsem.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/async.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/shmem_fs.h>
  29. #include <linux/pipe_fs_i.h>
  30. #include <trace/events/module.h>
  31. #define CAP_BSET (void *)1
  32. #define CAP_PI (void *)2
  33. static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
  34. static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
  35. static DEFINE_SPINLOCK(umh_sysctl_lock);
  36. static DECLARE_RWSEM(umhelper_sem);
  37. static void call_usermodehelper_freeinfo(struct subprocess_info *info)
  38. {
  39. if (info->cleanup)
  40. (*info->cleanup)(info);
  41. kfree(info);
  42. }
  43. static void umh_complete(struct subprocess_info *sub_info)
  44. {
  45. struct completion *comp = xchg(&sub_info->complete, NULL);
  46. /*
  47. * See call_usermodehelper_exec(). If xchg() returns NULL
  48. * we own sub_info, the UMH_KILLABLE caller has gone away
  49. * or the caller used UMH_NO_WAIT.
  50. */
  51. if (comp)
  52. complete(comp);
  53. else
  54. call_usermodehelper_freeinfo(sub_info);
  55. }
  56. /*
  57. * This is the task which runs the usermode application
  58. */
  59. static int call_usermodehelper_exec_async(void *data)
  60. {
  61. struct subprocess_info *sub_info = data;
  62. struct cred *new;
  63. int retval;
  64. spin_lock_irq(&current->sighand->siglock);
  65. flush_signal_handlers(current, 1);
  66. spin_unlock_irq(&current->sighand->siglock);
  67. /*
  68. * Our parent (unbound workqueue) runs with elevated scheduling
  69. * priority. Avoid propagating that into the userspace child.
  70. */
  71. set_user_nice(current, 0);
  72. retval = -ENOMEM;
  73. new = prepare_kernel_cred(current);
  74. if (!new)
  75. goto out;
  76. spin_lock(&umh_sysctl_lock);
  77. new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
  78. new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
  79. new->cap_inheritable);
  80. spin_unlock(&umh_sysctl_lock);
  81. if (sub_info->init) {
  82. retval = sub_info->init(sub_info, new);
  83. if (retval) {
  84. abort_creds(new);
  85. goto out;
  86. }
  87. }
  88. commit_creds(new);
  89. sub_info->pid = task_pid_nr(current);
  90. if (sub_info->file)
  91. retval = do_execve_file(sub_info->file,
  92. sub_info->argv, sub_info->envp);
  93. else
  94. retval = do_execve(getname_kernel(sub_info->path),
  95. (const char __user *const __user *)sub_info->argv,
  96. (const char __user *const __user *)sub_info->envp);
  97. out:
  98. sub_info->retval = retval;
  99. /*
  100. * call_usermodehelper_exec_sync() will call umh_complete
  101. * if UHM_WAIT_PROC.
  102. */
  103. if (!(sub_info->wait & UMH_WAIT_PROC))
  104. umh_complete(sub_info);
  105. if (!retval)
  106. return 0;
  107. do_exit(0);
  108. }
  109. /* Handles UMH_WAIT_PROC. */
  110. static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
  111. {
  112. pid_t pid;
  113. /* If SIGCLD is ignored kernel_wait4 won't populate the status. */
  114. kernel_sigaction(SIGCHLD, SIG_DFL);
  115. pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
  116. if (pid < 0) {
  117. sub_info->retval = pid;
  118. } else {
  119. int ret = -ECHILD;
  120. /*
  121. * Normally it is bogus to call wait4() from in-kernel because
  122. * wait4() wants to write the exit code to a userspace address.
  123. * But call_usermodehelper_exec_sync() always runs as kernel
  124. * thread (workqueue) and put_user() to a kernel address works
  125. * OK for kernel threads, due to their having an mm_segment_t
  126. * which spans the entire address space.
  127. *
  128. * Thus the __user pointer cast is valid here.
  129. */
  130. kernel_wait4(pid, (int __user *)&ret, 0, NULL);
  131. /*
  132. * If ret is 0, either call_usermodehelper_exec_async failed and
  133. * the real error code is already in sub_info->retval or
  134. * sub_info->retval is 0 anyway, so don't mess with it then.
  135. */
  136. if (ret)
  137. sub_info->retval = ret;
  138. }
  139. /* Restore default kernel sig handler */
  140. kernel_sigaction(SIGCHLD, SIG_IGN);
  141. umh_complete(sub_info);
  142. }
  143. /*
  144. * We need to create the usermodehelper kernel thread from a task that is affine
  145. * to an optimized set of CPUs (or nohz housekeeping ones) such that they
  146. * inherit a widest affinity irrespective of call_usermodehelper() callers with
  147. * possibly reduced affinity (eg: per-cpu workqueues). We don't want
  148. * usermodehelper targets to contend a busy CPU.
  149. *
  150. * Unbound workqueues provide such wide affinity and allow to block on
  151. * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
  152. *
  153. * Besides, workqueues provide the privilege level that caller might not have
  154. * to perform the usermodehelper request.
  155. *
  156. */
  157. static void call_usermodehelper_exec_work(struct work_struct *work)
  158. {
  159. struct subprocess_info *sub_info =
  160. container_of(work, struct subprocess_info, work);
  161. if (sub_info->wait & UMH_WAIT_PROC) {
  162. call_usermodehelper_exec_sync(sub_info);
  163. } else {
  164. pid_t pid;
  165. /*
  166. * Use CLONE_PARENT to reparent it to kthreadd; we do not
  167. * want to pollute current->children, and we need a parent
  168. * that always ignores SIGCHLD to ensure auto-reaping.
  169. */
  170. pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
  171. CLONE_PARENT | SIGCHLD);
  172. if (pid < 0) {
  173. sub_info->retval = pid;
  174. umh_complete(sub_info);
  175. }
  176. }
  177. }
  178. /*
  179. * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
  180. * (used for preventing user land processes from being created after the user
  181. * land has been frozen during a system-wide hibernation or suspend operation).
  182. * Should always be manipulated under umhelper_sem acquired for write.
  183. */
  184. static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
  185. /* Number of helpers running */
  186. static atomic_t running_helpers = ATOMIC_INIT(0);
  187. /*
  188. * Wait queue head used by usermodehelper_disable() to wait for all running
  189. * helpers to finish.
  190. */
  191. static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
  192. /*
  193. * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
  194. * to become 'false'.
  195. */
  196. static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
  197. /*
  198. * Time to wait for running_helpers to become zero before the setting of
  199. * usermodehelper_disabled in usermodehelper_disable() fails
  200. */
  201. #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
  202. int usermodehelper_read_trylock(void)
  203. {
  204. DEFINE_WAIT(wait);
  205. int ret = 0;
  206. down_read(&umhelper_sem);
  207. for (;;) {
  208. prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
  209. TASK_INTERRUPTIBLE);
  210. if (!usermodehelper_disabled)
  211. break;
  212. if (usermodehelper_disabled == UMH_DISABLED)
  213. ret = -EAGAIN;
  214. up_read(&umhelper_sem);
  215. if (ret)
  216. break;
  217. schedule();
  218. try_to_freeze();
  219. down_read(&umhelper_sem);
  220. }
  221. finish_wait(&usermodehelper_disabled_waitq, &wait);
  222. return ret;
  223. }
  224. EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
  225. long usermodehelper_read_lock_wait(long timeout)
  226. {
  227. DEFINE_WAIT(wait);
  228. if (timeout < 0)
  229. return -EINVAL;
  230. down_read(&umhelper_sem);
  231. for (;;) {
  232. prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
  233. TASK_UNINTERRUPTIBLE);
  234. if (!usermodehelper_disabled)
  235. break;
  236. up_read(&umhelper_sem);
  237. timeout = schedule_timeout(timeout);
  238. if (!timeout)
  239. break;
  240. down_read(&umhelper_sem);
  241. }
  242. finish_wait(&usermodehelper_disabled_waitq, &wait);
  243. return timeout;
  244. }
  245. EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
  246. void usermodehelper_read_unlock(void)
  247. {
  248. up_read(&umhelper_sem);
  249. }
  250. EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
  251. /**
  252. * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
  253. * @depth: New value to assign to usermodehelper_disabled.
  254. *
  255. * Change the value of usermodehelper_disabled (under umhelper_sem locked for
  256. * writing) and wakeup tasks waiting for it to change.
  257. */
  258. void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
  259. {
  260. down_write(&umhelper_sem);
  261. usermodehelper_disabled = depth;
  262. wake_up(&usermodehelper_disabled_waitq);
  263. up_write(&umhelper_sem);
  264. }
  265. /**
  266. * __usermodehelper_disable - Prevent new helpers from being started.
  267. * @depth: New value to assign to usermodehelper_disabled.
  268. *
  269. * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
  270. */
  271. int __usermodehelper_disable(enum umh_disable_depth depth)
  272. {
  273. long retval;
  274. if (!depth)
  275. return -EINVAL;
  276. down_write(&umhelper_sem);
  277. usermodehelper_disabled = depth;
  278. up_write(&umhelper_sem);
  279. /*
  280. * From now on call_usermodehelper_exec() won't start any new
  281. * helpers, so it is sufficient if running_helpers turns out to
  282. * be zero at one point (it may be increased later, but that
  283. * doesn't matter).
  284. */
  285. retval = wait_event_timeout(running_helpers_waitq,
  286. atomic_read(&running_helpers) == 0,
  287. RUNNING_HELPERS_TIMEOUT);
  288. if (retval)
  289. return 0;
  290. __usermodehelper_set_disable_depth(UMH_ENABLED);
  291. return -EAGAIN;
  292. }
  293. static void helper_lock(void)
  294. {
  295. atomic_inc(&running_helpers);
  296. smp_mb__after_atomic();
  297. }
  298. static void helper_unlock(void)
  299. {
  300. if (atomic_dec_and_test(&running_helpers))
  301. wake_up(&running_helpers_waitq);
  302. }
  303. /**
  304. * call_usermodehelper_setup - prepare to call a usermode helper
  305. * @path: path to usermode executable
  306. * @argv: arg vector for process
  307. * @envp: environment for process
  308. * @gfp_mask: gfp mask for memory allocation
  309. * @cleanup: a cleanup function
  310. * @init: an init function
  311. * @data: arbitrary context sensitive data
  312. *
  313. * Returns either %NULL on allocation failure, or a subprocess_info
  314. * structure. This should be passed to call_usermodehelper_exec to
  315. * exec the process and free the structure.
  316. *
  317. * The init function is used to customize the helper process prior to
  318. * exec. A non-zero return code causes the process to error out, exit,
  319. * and return the failure to the calling process
  320. *
  321. * The cleanup function is just before ethe subprocess_info is about to
  322. * be freed. This can be used for freeing the argv and envp. The
  323. * Function must be runnable in either a process context or the
  324. * context in which call_usermodehelper_exec is called.
  325. */
  326. struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
  327. char **envp, gfp_t gfp_mask,
  328. int (*init)(struct subprocess_info *info, struct cred *new),
  329. void (*cleanup)(struct subprocess_info *info),
  330. void *data)
  331. {
  332. struct subprocess_info *sub_info;
  333. sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
  334. if (!sub_info)
  335. goto out;
  336. INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
  337. #ifdef CONFIG_STATIC_USERMODEHELPER
  338. sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
  339. #else
  340. sub_info->path = path;
  341. #endif
  342. sub_info->argv = argv;
  343. sub_info->envp = envp;
  344. sub_info->cleanup = cleanup;
  345. sub_info->init = init;
  346. sub_info->data = data;
  347. out:
  348. return sub_info;
  349. }
  350. EXPORT_SYMBOL(call_usermodehelper_setup);
  351. struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
  352. int (*init)(struct subprocess_info *info, struct cred *new),
  353. void (*cleanup)(struct subprocess_info *info), void *data)
  354. {
  355. struct subprocess_info *sub_info;
  356. struct umh_info *info = data;
  357. const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper";
  358. sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL);
  359. if (!sub_info)
  360. return NULL;
  361. sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL);
  362. if (!sub_info->argv) {
  363. kfree(sub_info);
  364. return NULL;
  365. }
  366. INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
  367. sub_info->path = "none";
  368. sub_info->file = file;
  369. sub_info->init = init;
  370. sub_info->cleanup = cleanup;
  371. sub_info->data = data;
  372. return sub_info;
  373. }
  374. static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
  375. {
  376. struct umh_info *umh_info = info->data;
  377. struct file *from_umh[2];
  378. struct file *to_umh[2];
  379. int err;
  380. /* create pipe to send data to umh */
  381. err = create_pipe_files(to_umh, 0);
  382. if (err)
  383. return err;
  384. err = replace_fd(0, to_umh[0], 0);
  385. fput(to_umh[0]);
  386. if (err < 0) {
  387. fput(to_umh[1]);
  388. return err;
  389. }
  390. /* create pipe to receive data from umh */
  391. err = create_pipe_files(from_umh, 0);
  392. if (err) {
  393. fput(to_umh[1]);
  394. replace_fd(0, NULL, 0);
  395. return err;
  396. }
  397. err = replace_fd(1, from_umh[1], 0);
  398. fput(from_umh[1]);
  399. if (err < 0) {
  400. fput(to_umh[1]);
  401. replace_fd(0, NULL, 0);
  402. fput(from_umh[0]);
  403. return err;
  404. }
  405. umh_info->pipe_to_umh = to_umh[1];
  406. umh_info->pipe_from_umh = from_umh[0];
  407. return 0;
  408. }
  409. static void umh_clean_and_save_pid(struct subprocess_info *info)
  410. {
  411. struct umh_info *umh_info = info->data;
  412. argv_free(info->argv);
  413. umh_info->pid = info->pid;
  414. }
  415. /**
  416. * fork_usermode_blob - fork a blob of bytes as a usermode process
  417. * @data: a blob of bytes that can be do_execv-ed as a file
  418. * @len: length of the blob
  419. * @info: information about usermode process (shouldn't be NULL)
  420. *
  421. * If info->cmdline is set it will be used as command line for the
  422. * user process, else "usermodehelper" is used.
  423. *
  424. * Returns either negative error or zero which indicates success
  425. * in executing a blob of bytes as a usermode process. In such
  426. * case 'struct umh_info *info' is populated with two pipes
  427. * and a pid of the process. The caller is responsible for health
  428. * check of the user process, killing it via pid, and closing the
  429. * pipes when user process is no longer needed.
  430. */
  431. int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
  432. {
  433. struct subprocess_info *sub_info;
  434. struct file *file;
  435. ssize_t written;
  436. loff_t pos = 0;
  437. int err;
  438. file = shmem_kernel_file_setup("", len, 0);
  439. if (IS_ERR(file))
  440. return PTR_ERR(file);
  441. written = kernel_write(file, data, len, &pos);
  442. if (written != len) {
  443. err = written;
  444. if (err >= 0)
  445. err = -ENOMEM;
  446. goto out;
  447. }
  448. err = -ENOMEM;
  449. sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup,
  450. umh_clean_and_save_pid, info);
  451. if (!sub_info)
  452. goto out;
  453. err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
  454. out:
  455. fput(file);
  456. return err;
  457. }
  458. EXPORT_SYMBOL_GPL(fork_usermode_blob);
  459. /**
  460. * call_usermodehelper_exec - start a usermode application
  461. * @sub_info: information about the subprocessa
  462. * @wait: wait for the application to finish and return status.
  463. * when UMH_NO_WAIT don't wait at all, but you get no useful error back
  464. * when the program couldn't be exec'ed. This makes it safe to call
  465. * from interrupt context.
  466. *
  467. * Runs a user-space application. The application is started
  468. * asynchronously if wait is not set, and runs as a child of system workqueues.
  469. * (ie. it runs with full root capabilities and optimized affinity).
  470. */
  471. int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
  472. {
  473. DECLARE_COMPLETION_ONSTACK(done);
  474. int retval = 0;
  475. if (!sub_info->path) {
  476. call_usermodehelper_freeinfo(sub_info);
  477. return -EINVAL;
  478. }
  479. helper_lock();
  480. if (usermodehelper_disabled) {
  481. retval = -EBUSY;
  482. goto out;
  483. }
  484. /*
  485. * If there is no binary for us to call, then just return and get out of
  486. * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
  487. * disable all call_usermodehelper() calls.
  488. */
  489. if (strlen(sub_info->path) == 0)
  490. goto out;
  491. /*
  492. * Set the completion pointer only if there is a waiter.
  493. * This makes it possible to use umh_complete to free
  494. * the data structure in case of UMH_NO_WAIT.
  495. */
  496. sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
  497. sub_info->wait = wait;
  498. queue_work(system_unbound_wq, &sub_info->work);
  499. if (wait == UMH_NO_WAIT) /* task has freed sub_info */
  500. goto unlock;
  501. if (wait & UMH_KILLABLE) {
  502. retval = wait_for_completion_killable(&done);
  503. if (!retval)
  504. goto wait_done;
  505. /* umh_complete() will see NULL and free sub_info */
  506. if (xchg(&sub_info->complete, NULL))
  507. goto unlock;
  508. /* fallthrough, umh_complete() was already called */
  509. }
  510. wait_for_completion(&done);
  511. wait_done:
  512. retval = sub_info->retval;
  513. out:
  514. call_usermodehelper_freeinfo(sub_info);
  515. unlock:
  516. helper_unlock();
  517. return retval;
  518. }
  519. EXPORT_SYMBOL(call_usermodehelper_exec);
  520. /**
  521. * call_usermodehelper() - prepare and start a usermode application
  522. * @path: path to usermode executable
  523. * @argv: arg vector for process
  524. * @envp: environment for process
  525. * @wait: wait for the application to finish and return status.
  526. * when UMH_NO_WAIT don't wait at all, but you get no useful error back
  527. * when the program couldn't be exec'ed. This makes it safe to call
  528. * from interrupt context.
  529. *
  530. * This function is the equivalent to use call_usermodehelper_setup() and
  531. * call_usermodehelper_exec().
  532. */
  533. int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
  534. {
  535. struct subprocess_info *info;
  536. gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
  537. info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
  538. NULL, NULL, NULL);
  539. if (info == NULL)
  540. return -ENOMEM;
  541. return call_usermodehelper_exec(info, wait);
  542. }
  543. EXPORT_SYMBOL(call_usermodehelper);
  544. static int proc_cap_handler(struct ctl_table *table, int write,
  545. void __user *buffer, size_t *lenp, loff_t *ppos)
  546. {
  547. struct ctl_table t;
  548. unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
  549. kernel_cap_t new_cap;
  550. int err, i;
  551. if (write && (!capable(CAP_SETPCAP) ||
  552. !capable(CAP_SYS_MODULE)))
  553. return -EPERM;
  554. /*
  555. * convert from the global kernel_cap_t to the ulong array to print to
  556. * userspace if this is a read.
  557. */
  558. spin_lock(&umh_sysctl_lock);
  559. for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
  560. if (table->data == CAP_BSET)
  561. cap_array[i] = usermodehelper_bset.cap[i];
  562. else if (table->data == CAP_PI)
  563. cap_array[i] = usermodehelper_inheritable.cap[i];
  564. else
  565. BUG();
  566. }
  567. spin_unlock(&umh_sysctl_lock);
  568. t = *table;
  569. t.data = &cap_array;
  570. /*
  571. * actually read or write and array of ulongs from userspace. Remember
  572. * these are least significant 32 bits first
  573. */
  574. err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
  575. if (err < 0)
  576. return err;
  577. /*
  578. * convert from the sysctl array of ulongs to the kernel_cap_t
  579. * internal representation
  580. */
  581. for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
  582. new_cap.cap[i] = cap_array[i];
  583. /*
  584. * Drop everything not in the new_cap (but don't add things)
  585. */
  586. if (write) {
  587. spin_lock(&umh_sysctl_lock);
  588. if (table->data == CAP_BSET)
  589. usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
  590. if (table->data == CAP_PI)
  591. usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
  592. spin_unlock(&umh_sysctl_lock);
  593. }
  594. return 0;
  595. }
  596. struct ctl_table usermodehelper_table[] = {
  597. {
  598. .procname = "bset",
  599. .data = CAP_BSET,
  600. .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
  601. .mode = 0600,
  602. .proc_handler = proc_cap_handler,
  603. },
  604. {
  605. .procname = "inheritable",
  606. .data = CAP_PI,
  607. .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
  608. .mode = 0600,
  609. .proc_handler = proc_cap_handler,
  610. },
  611. { }
  612. };