kobject_uevent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /*
  2. * kernel userspace event delivery
  3. *
  4. * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
  5. * Copyright (C) 2004 Novell, Inc. All rights reserved.
  6. * Copyright (C) 2004 IBM, Inc. All rights reserved.
  7. *
  8. * Licensed under the GNU GPL v2.
  9. *
  10. * Authors:
  11. * Robert Love <rml@novell.com>
  12. * Kay Sievers <kay.sievers@vrfy.org>
  13. * Arjan van de Ven <arjanv@redhat.com>
  14. * Greg Kroah-Hartman <greg@kroah.com>
  15. */
  16. #include <linux/spinlock.h>
  17. #include <linux/string.h>
  18. #include <linux/kobject.h>
  19. #include <linux/export.h>
  20. #include <linux/kmod.h>
  21. #include <linux/slab.h>
  22. #include <linux/user_namespace.h>
  23. #include <linux/socket.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/netlink.h>
  26. #include <net/sock.h>
  27. #include <net/net_namespace.h>
  28. u64 uevent_seqnum;
  29. #ifdef CONFIG_UEVENT_HELPER
  30. char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
  31. #endif
  32. #ifdef CONFIG_NET
  33. struct uevent_sock {
  34. struct list_head list;
  35. struct sock *sk;
  36. };
  37. static LIST_HEAD(uevent_sock_list);
  38. #endif
  39. /* This lock protects uevent_seqnum and uevent_sock_list */
  40. static DEFINE_MUTEX(uevent_sock_mutex);
  41. /* the strings here must match the enum in include/linux/kobject.h */
  42. static const char *kobject_actions[] = {
  43. [KOBJ_ADD] = "add",
  44. [KOBJ_REMOVE] = "remove",
  45. [KOBJ_CHANGE] = "change",
  46. [KOBJ_MOVE] = "move",
  47. [KOBJ_ONLINE] = "online",
  48. [KOBJ_OFFLINE] = "offline",
  49. };
  50. /**
  51. * kobject_action_type - translate action string to numeric type
  52. *
  53. * @buf: buffer containing the action string, newline is ignored
  54. * @len: length of buffer
  55. * @type: pointer to the location to store the action type
  56. *
  57. * Returns 0 if the action string was recognized.
  58. */
  59. int kobject_action_type(const char *buf, size_t count,
  60. enum kobject_action *type)
  61. {
  62. enum kobject_action action;
  63. int ret = -EINVAL;
  64. if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
  65. count--;
  66. if (!count)
  67. goto out;
  68. for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
  69. if (strncmp(kobject_actions[action], buf, count) != 0)
  70. continue;
  71. if (kobject_actions[action][count] != '\0')
  72. continue;
  73. *type = action;
  74. ret = 0;
  75. break;
  76. }
  77. out:
  78. return ret;
  79. }
  80. #ifdef CONFIG_NET
  81. static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
  82. {
  83. struct kobject *kobj = data, *ksobj;
  84. const struct kobj_ns_type_operations *ops;
  85. ops = kobj_ns_ops(kobj);
  86. if (!ops && kobj->kset) {
  87. ksobj = &kobj->kset->kobj;
  88. if (ksobj->parent != NULL)
  89. ops = kobj_ns_ops(ksobj->parent);
  90. }
  91. if (ops && ops->netlink_ns && kobj->ktype->namespace) {
  92. const void *sock_ns, *ns;
  93. ns = kobj->ktype->namespace(kobj);
  94. sock_ns = ops->netlink_ns(dsk);
  95. return sock_ns != ns;
  96. }
  97. return 0;
  98. }
  99. #endif
  100. #ifdef CONFIG_UEVENT_HELPER
  101. static int kobj_usermode_filter(struct kobject *kobj)
  102. {
  103. const struct kobj_ns_type_operations *ops;
  104. ops = kobj_ns_ops(kobj);
  105. if (ops) {
  106. const void *init_ns, *ns;
  107. ns = kobj->ktype->namespace(kobj);
  108. init_ns = ops->initial_ns();
  109. return ns != init_ns;
  110. }
  111. return 0;
  112. }
  113. static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
  114. {
  115. int len;
  116. len = strlcpy(&env->buf[env->buflen], subsystem,
  117. sizeof(env->buf) - env->buflen);
  118. if (len >= (sizeof(env->buf) - env->buflen)) {
  119. WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
  120. return -ENOMEM;
  121. }
  122. env->argv[0] = uevent_helper;
  123. env->argv[1] = &env->buf[env->buflen];
  124. env->argv[2] = NULL;
  125. env->buflen += len + 1;
  126. return 0;
  127. }
  128. static void cleanup_uevent_env(struct subprocess_info *info)
  129. {
  130. kfree(info->data);
  131. }
  132. #endif
  133. /**
  134. * kobject_uevent_env - send an uevent with environmental data
  135. *
  136. * @action: action that is happening
  137. * @kobj: struct kobject that the action is happening to
  138. * @envp_ext: pointer to environmental data
  139. *
  140. * Returns 0 if kobject_uevent_env() is completed with success or the
  141. * corresponding error when it fails.
  142. */
  143. int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
  144. char *envp_ext[])
  145. {
  146. struct kobj_uevent_env *env;
  147. const char *action_string = kobject_actions[action];
  148. const char *devpath = NULL;
  149. const char *subsystem;
  150. struct kobject *top_kobj;
  151. struct kset *kset;
  152. const struct kset_uevent_ops *uevent_ops;
  153. int i = 0;
  154. int retval = 0;
  155. #ifdef CONFIG_NET
  156. struct uevent_sock *ue_sk;
  157. #endif
  158. pr_debug("kobject: '%s' (%p): %s\n",
  159. kobject_name(kobj), kobj, __func__);
  160. /* search the kset we belong to */
  161. top_kobj = kobj;
  162. while (!top_kobj->kset && top_kobj->parent)
  163. top_kobj = top_kobj->parent;
  164. if (!top_kobj->kset) {
  165. pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
  166. "without kset!\n", kobject_name(kobj), kobj,
  167. __func__);
  168. return -EINVAL;
  169. }
  170. kset = top_kobj->kset;
  171. uevent_ops = kset->uevent_ops;
  172. /* skip the event, if uevent_suppress is set*/
  173. if (kobj->uevent_suppress) {
  174. pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
  175. "caused the event to drop!\n",
  176. kobject_name(kobj), kobj, __func__);
  177. return 0;
  178. }
  179. /* skip the event, if the filter returns zero. */
  180. if (uevent_ops && uevent_ops->filter)
  181. if (!uevent_ops->filter(kset, kobj)) {
  182. pr_debug("kobject: '%s' (%p): %s: filter function "
  183. "caused the event to drop!\n",
  184. kobject_name(kobj), kobj, __func__);
  185. return 0;
  186. }
  187. /* originating subsystem */
  188. if (uevent_ops && uevent_ops->name)
  189. subsystem = uevent_ops->name(kset, kobj);
  190. else
  191. subsystem = kobject_name(&kset->kobj);
  192. if (!subsystem) {
  193. pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
  194. "event to drop!\n", kobject_name(kobj), kobj,
  195. __func__);
  196. return 0;
  197. }
  198. /* environment buffer */
  199. env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
  200. if (!env)
  201. return -ENOMEM;
  202. /* complete object path */
  203. devpath = kobject_get_path(kobj, GFP_KERNEL);
  204. if (!devpath) {
  205. retval = -ENOENT;
  206. goto exit;
  207. }
  208. /* default keys */
  209. retval = add_uevent_var(env, "ACTION=%s", action_string);
  210. if (retval)
  211. goto exit;
  212. retval = add_uevent_var(env, "DEVPATH=%s", devpath);
  213. if (retval)
  214. goto exit;
  215. retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
  216. if (retval)
  217. goto exit;
  218. /* keys passed in from the caller */
  219. if (envp_ext) {
  220. for (i = 0; envp_ext[i]; i++) {
  221. retval = add_uevent_var(env, "%s", envp_ext[i]);
  222. if (retval)
  223. goto exit;
  224. }
  225. }
  226. /* let the kset specific function add its stuff */
  227. if (uevent_ops && uevent_ops->uevent) {
  228. retval = uevent_ops->uevent(kset, kobj, env);
  229. if (retval) {
  230. pr_debug("kobject: '%s' (%p): %s: uevent() returned "
  231. "%d\n", kobject_name(kobj), kobj,
  232. __func__, retval);
  233. goto exit;
  234. }
  235. }
  236. /*
  237. * Mark "add" and "remove" events in the object to ensure proper
  238. * events to userspace during automatic cleanup. If the object did
  239. * send an "add" event, "remove" will automatically generated by
  240. * the core, if not already done by the caller.
  241. */
  242. if (action == KOBJ_ADD)
  243. kobj->state_add_uevent_sent = 1;
  244. else if (action == KOBJ_REMOVE)
  245. kobj->state_remove_uevent_sent = 1;
  246. mutex_lock(&uevent_sock_mutex);
  247. /* we will send an event, so request a new sequence number */
  248. retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
  249. if (retval) {
  250. mutex_unlock(&uevent_sock_mutex);
  251. goto exit;
  252. }
  253. #if defined(CONFIG_NET)
  254. /* send netlink message */
  255. list_for_each_entry(ue_sk, &uevent_sock_list, list) {
  256. struct sock *uevent_sock = ue_sk->sk;
  257. struct sk_buff *skb;
  258. size_t len;
  259. if (!netlink_has_listeners(uevent_sock, 1))
  260. continue;
  261. /* allocate message with the maximum possible size */
  262. len = strlen(action_string) + strlen(devpath) + 2;
  263. skb = alloc_skb(len + env->buflen, GFP_KERNEL);
  264. if (skb) {
  265. char *scratch;
  266. /* add header */
  267. scratch = skb_put(skb, len);
  268. sprintf(scratch, "%s@%s", action_string, devpath);
  269. /* copy keys to our continuous event payload buffer */
  270. for (i = 0; i < env->envp_idx; i++) {
  271. len = strlen(env->envp[i]) + 1;
  272. scratch = skb_put(skb, len);
  273. strcpy(scratch, env->envp[i]);
  274. }
  275. NETLINK_CB(skb).dst_group = 1;
  276. retval = netlink_broadcast_filtered(uevent_sock, skb,
  277. 0, 1, GFP_KERNEL,
  278. kobj_bcast_filter,
  279. kobj);
  280. /* ENOBUFS should be handled in userspace */
  281. if (retval == -ENOBUFS || retval == -ESRCH)
  282. retval = 0;
  283. } else
  284. retval = -ENOMEM;
  285. }
  286. #endif
  287. mutex_unlock(&uevent_sock_mutex);
  288. #ifdef CONFIG_UEVENT_HELPER
  289. /* call uevent_helper, usually only enabled during early boot */
  290. if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
  291. struct subprocess_info *info;
  292. retval = add_uevent_var(env, "HOME=/");
  293. if (retval)
  294. goto exit;
  295. retval = add_uevent_var(env,
  296. "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
  297. if (retval)
  298. goto exit;
  299. retval = init_uevent_argv(env, subsystem);
  300. if (retval)
  301. goto exit;
  302. retval = -ENOMEM;
  303. info = call_usermodehelper_setup(env->argv[0], env->argv,
  304. env->envp, GFP_KERNEL,
  305. NULL, cleanup_uevent_env, env);
  306. if (info) {
  307. retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
  308. env = NULL; /* freed by cleanup_uevent_env */
  309. }
  310. }
  311. #endif
  312. exit:
  313. kfree(devpath);
  314. kfree(env);
  315. return retval;
  316. }
  317. EXPORT_SYMBOL_GPL(kobject_uevent_env);
  318. /**
  319. * kobject_uevent - notify userspace by sending an uevent
  320. *
  321. * @action: action that is happening
  322. * @kobj: struct kobject that the action is happening to
  323. *
  324. * Returns 0 if kobject_uevent() is completed with success or the
  325. * corresponding error when it fails.
  326. */
  327. int kobject_uevent(struct kobject *kobj, enum kobject_action action)
  328. {
  329. return kobject_uevent_env(kobj, action, NULL);
  330. }
  331. EXPORT_SYMBOL_GPL(kobject_uevent);
  332. /**
  333. * add_uevent_var - add key value string to the environment buffer
  334. * @env: environment buffer structure
  335. * @format: printf format for the key=value pair
  336. *
  337. * Returns 0 if environment variable was added successfully or -ENOMEM
  338. * if no space was available.
  339. */
  340. int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
  341. {
  342. va_list args;
  343. int len;
  344. if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
  345. WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
  346. return -ENOMEM;
  347. }
  348. va_start(args, format);
  349. len = vsnprintf(&env->buf[env->buflen],
  350. sizeof(env->buf) - env->buflen,
  351. format, args);
  352. va_end(args);
  353. if (len >= (sizeof(env->buf) - env->buflen)) {
  354. WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
  355. return -ENOMEM;
  356. }
  357. env->envp[env->envp_idx++] = &env->buf[env->buflen];
  358. env->buflen += len + 1;
  359. return 0;
  360. }
  361. EXPORT_SYMBOL_GPL(add_uevent_var);
  362. #if defined(CONFIG_NET)
  363. static int uevent_net_init(struct net *net)
  364. {
  365. struct uevent_sock *ue_sk;
  366. struct netlink_kernel_cfg cfg = {
  367. .groups = 1,
  368. .flags = NL_CFG_F_NONROOT_RECV,
  369. };
  370. ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
  371. if (!ue_sk)
  372. return -ENOMEM;
  373. ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
  374. if (!ue_sk->sk) {
  375. printk(KERN_ERR
  376. "kobject_uevent: unable to create netlink socket!\n");
  377. kfree(ue_sk);
  378. return -ENODEV;
  379. }
  380. mutex_lock(&uevent_sock_mutex);
  381. list_add_tail(&ue_sk->list, &uevent_sock_list);
  382. mutex_unlock(&uevent_sock_mutex);
  383. return 0;
  384. }
  385. static void uevent_net_exit(struct net *net)
  386. {
  387. struct uevent_sock *ue_sk;
  388. mutex_lock(&uevent_sock_mutex);
  389. list_for_each_entry(ue_sk, &uevent_sock_list, list) {
  390. if (sock_net(ue_sk->sk) == net)
  391. goto found;
  392. }
  393. mutex_unlock(&uevent_sock_mutex);
  394. return;
  395. found:
  396. list_del(&ue_sk->list);
  397. mutex_unlock(&uevent_sock_mutex);
  398. netlink_kernel_release(ue_sk->sk);
  399. kfree(ue_sk);
  400. }
  401. static struct pernet_operations uevent_net_ops = {
  402. .init = uevent_net_init,
  403. .exit = uevent_net_exit,
  404. };
  405. static int __init kobject_uevent_init(void)
  406. {
  407. return register_pernet_subsys(&uevent_net_ops);
  408. }
  409. postcore_initcall(kobject_uevent_init);
  410. #endif