audit_tree.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. #include "audit.h"
  2. #include <linux/fsnotify_backend.h>
  3. #include <linux/namei.h>
  4. #include <linux/mount.h>
  5. #include <linux/kthread.h>
  6. #include <linux/slab.h>
  7. struct audit_tree;
  8. struct audit_chunk;
  9. struct audit_tree {
  10. atomic_t count;
  11. int goner;
  12. struct audit_chunk *root;
  13. struct list_head chunks;
  14. struct list_head rules;
  15. struct list_head list;
  16. struct list_head same_root;
  17. struct rcu_head head;
  18. char pathname[];
  19. };
  20. struct audit_chunk {
  21. struct list_head hash;
  22. struct fsnotify_mark mark;
  23. struct list_head trees; /* with root here */
  24. int dead;
  25. int count;
  26. atomic_long_t refs;
  27. struct rcu_head head;
  28. struct node {
  29. struct list_head list;
  30. struct audit_tree *owner;
  31. unsigned index; /* index; upper bit indicates 'will prune' */
  32. } owners[];
  33. };
  34. static LIST_HEAD(tree_list);
  35. static LIST_HEAD(prune_list);
  36. static struct task_struct *prune_thread;
  37. /*
  38. * One struct chunk is attached to each inode of interest.
  39. * We replace struct chunk on tagging/untagging.
  40. * Rules have pointer to struct audit_tree.
  41. * Rules have struct list_head rlist forming a list of rules over
  42. * the same tree.
  43. * References to struct chunk are collected at audit_inode{,_child}()
  44. * time and used in AUDIT_TREE rule matching.
  45. * These references are dropped at the same time we are calling
  46. * audit_free_names(), etc.
  47. *
  48. * Cyclic lists galore:
  49. * tree.chunks anchors chunk.owners[].list hash_lock
  50. * tree.rules anchors rule.rlist audit_filter_mutex
  51. * chunk.trees anchors tree.same_root hash_lock
  52. * chunk.hash is a hash with middle bits of watch.inode as
  53. * a hash function. RCU, hash_lock
  54. *
  55. * tree is refcounted; one reference for "some rules on rules_list refer to
  56. * it", one for each chunk with pointer to it.
  57. *
  58. * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  59. * of watch contributes 1 to .refs).
  60. *
  61. * node.index allows to get from node.list to containing chunk.
  62. * MSB of that sucker is stolen to mark taggings that we might have to
  63. * revert - several operations have very unpleasant cleanup logics and
  64. * that makes a difference. Some.
  65. */
  66. static struct fsnotify_group *audit_tree_group;
  67. static struct audit_tree *alloc_tree(const char *s)
  68. {
  69. struct audit_tree *tree;
  70. tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  71. if (tree) {
  72. atomic_set(&tree->count, 1);
  73. tree->goner = 0;
  74. INIT_LIST_HEAD(&tree->chunks);
  75. INIT_LIST_HEAD(&tree->rules);
  76. INIT_LIST_HEAD(&tree->list);
  77. INIT_LIST_HEAD(&tree->same_root);
  78. tree->root = NULL;
  79. strcpy(tree->pathname, s);
  80. }
  81. return tree;
  82. }
  83. static inline void get_tree(struct audit_tree *tree)
  84. {
  85. atomic_inc(&tree->count);
  86. }
  87. static inline void put_tree(struct audit_tree *tree)
  88. {
  89. if (atomic_dec_and_test(&tree->count))
  90. kfree_rcu(tree, head);
  91. }
  92. /* to avoid bringing the entire thing in audit.h */
  93. const char *audit_tree_path(struct audit_tree *tree)
  94. {
  95. return tree->pathname;
  96. }
  97. static void free_chunk(struct audit_chunk *chunk)
  98. {
  99. int i;
  100. for (i = 0; i < chunk->count; i++) {
  101. if (chunk->owners[i].owner)
  102. put_tree(chunk->owners[i].owner);
  103. }
  104. kfree(chunk);
  105. }
  106. void audit_put_chunk(struct audit_chunk *chunk)
  107. {
  108. if (atomic_long_dec_and_test(&chunk->refs))
  109. free_chunk(chunk);
  110. }
  111. static void __put_chunk(struct rcu_head *rcu)
  112. {
  113. struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
  114. audit_put_chunk(chunk);
  115. }
  116. static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
  117. {
  118. struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
  119. call_rcu(&chunk->head, __put_chunk);
  120. }
  121. static struct audit_chunk *alloc_chunk(int count)
  122. {
  123. struct audit_chunk *chunk;
  124. size_t size;
  125. int i;
  126. size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
  127. chunk = kzalloc(size, GFP_KERNEL);
  128. if (!chunk)
  129. return NULL;
  130. INIT_LIST_HEAD(&chunk->hash);
  131. INIT_LIST_HEAD(&chunk->trees);
  132. chunk->count = count;
  133. atomic_long_set(&chunk->refs, 1);
  134. for (i = 0; i < count; i++) {
  135. INIT_LIST_HEAD(&chunk->owners[i].list);
  136. chunk->owners[i].index = i;
  137. }
  138. fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
  139. chunk->mark.mask = FS_IN_IGNORED;
  140. return chunk;
  141. }
  142. enum {HASH_SIZE = 128};
  143. static struct list_head chunk_hash_heads[HASH_SIZE];
  144. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
  145. static inline struct list_head *chunk_hash(const struct inode *inode)
  146. {
  147. unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
  148. return chunk_hash_heads + n % HASH_SIZE;
  149. }
  150. /* hash_lock & entry->lock is held by caller */
  151. static void insert_hash(struct audit_chunk *chunk)
  152. {
  153. struct fsnotify_mark *entry = &chunk->mark;
  154. struct list_head *list;
  155. if (!entry->inode)
  156. return;
  157. list = chunk_hash(entry->inode);
  158. list_add_rcu(&chunk->hash, list);
  159. }
  160. /* called under rcu_read_lock */
  161. struct audit_chunk *audit_tree_lookup(const struct inode *inode)
  162. {
  163. struct list_head *list = chunk_hash(inode);
  164. struct audit_chunk *p;
  165. list_for_each_entry_rcu(p, list, hash) {
  166. /* mark.inode may have gone NULL, but who cares? */
  167. if (p->mark.inode == inode) {
  168. atomic_long_inc(&p->refs);
  169. return p;
  170. }
  171. }
  172. return NULL;
  173. }
  174. bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
  175. {
  176. int n;
  177. for (n = 0; n < chunk->count; n++)
  178. if (chunk->owners[n].owner == tree)
  179. return true;
  180. return false;
  181. }
  182. /* tagging and untagging inodes with trees */
  183. static struct audit_chunk *find_chunk(struct node *p)
  184. {
  185. int index = p->index & ~(1U<<31);
  186. p -= index;
  187. return container_of(p, struct audit_chunk, owners[0]);
  188. }
  189. static void untag_chunk(struct node *p)
  190. {
  191. struct audit_chunk *chunk = find_chunk(p);
  192. struct fsnotify_mark *entry = &chunk->mark;
  193. struct audit_chunk *new = NULL;
  194. struct audit_tree *owner;
  195. int size = chunk->count - 1;
  196. int i, j;
  197. fsnotify_get_mark(entry);
  198. spin_unlock(&hash_lock);
  199. if (size)
  200. new = alloc_chunk(size);
  201. spin_lock(&entry->lock);
  202. if (chunk->dead || !entry->inode) {
  203. spin_unlock(&entry->lock);
  204. if (new)
  205. free_chunk(new);
  206. goto out;
  207. }
  208. owner = p->owner;
  209. if (!size) {
  210. chunk->dead = 1;
  211. spin_lock(&hash_lock);
  212. list_del_init(&chunk->trees);
  213. if (owner->root == chunk)
  214. owner->root = NULL;
  215. list_del_init(&p->list);
  216. list_del_rcu(&chunk->hash);
  217. spin_unlock(&hash_lock);
  218. spin_unlock(&entry->lock);
  219. fsnotify_destroy_mark(entry, audit_tree_group);
  220. goto out;
  221. }
  222. if (!new)
  223. goto Fallback;
  224. if (fsnotify_add_mark(&new->mark,
  225. entry->group, entry->inode, NULL, 1)) {
  226. fsnotify_put_mark(&new->mark);
  227. goto Fallback;
  228. }
  229. chunk->dead = 1;
  230. spin_lock(&hash_lock);
  231. list_replace_init(&chunk->trees, &new->trees);
  232. if (owner->root == chunk) {
  233. list_del_init(&owner->same_root);
  234. owner->root = NULL;
  235. }
  236. for (i = j = 0; j <= size; i++, j++) {
  237. struct audit_tree *s;
  238. if (&chunk->owners[j] == p) {
  239. list_del_init(&p->list);
  240. i--;
  241. continue;
  242. }
  243. s = chunk->owners[j].owner;
  244. new->owners[i].owner = s;
  245. new->owners[i].index = chunk->owners[j].index - j + i;
  246. if (!s) /* result of earlier fallback */
  247. continue;
  248. get_tree(s);
  249. list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
  250. }
  251. list_replace_rcu(&chunk->hash, &new->hash);
  252. list_for_each_entry(owner, &new->trees, same_root)
  253. owner->root = new;
  254. spin_unlock(&hash_lock);
  255. spin_unlock(&entry->lock);
  256. fsnotify_destroy_mark(entry, audit_tree_group);
  257. fsnotify_put_mark(&new->mark); /* drop initial reference */
  258. goto out;
  259. Fallback:
  260. // do the best we can
  261. spin_lock(&hash_lock);
  262. if (owner->root == chunk) {
  263. list_del_init(&owner->same_root);
  264. owner->root = NULL;
  265. }
  266. list_del_init(&p->list);
  267. p->owner = NULL;
  268. put_tree(owner);
  269. spin_unlock(&hash_lock);
  270. spin_unlock(&entry->lock);
  271. out:
  272. fsnotify_put_mark(entry);
  273. spin_lock(&hash_lock);
  274. }
  275. static int create_chunk(struct inode *inode, struct audit_tree *tree)
  276. {
  277. struct fsnotify_mark *entry;
  278. struct audit_chunk *chunk = alloc_chunk(1);
  279. if (!chunk)
  280. return -ENOMEM;
  281. entry = &chunk->mark;
  282. if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
  283. fsnotify_put_mark(entry);
  284. return -ENOSPC;
  285. }
  286. spin_lock(&entry->lock);
  287. spin_lock(&hash_lock);
  288. if (tree->goner) {
  289. spin_unlock(&hash_lock);
  290. chunk->dead = 1;
  291. spin_unlock(&entry->lock);
  292. fsnotify_destroy_mark(entry, audit_tree_group);
  293. fsnotify_put_mark(entry);
  294. return 0;
  295. }
  296. chunk->owners[0].index = (1U << 31);
  297. chunk->owners[0].owner = tree;
  298. get_tree(tree);
  299. list_add(&chunk->owners[0].list, &tree->chunks);
  300. if (!tree->root) {
  301. tree->root = chunk;
  302. list_add(&tree->same_root, &chunk->trees);
  303. }
  304. insert_hash(chunk);
  305. spin_unlock(&hash_lock);
  306. spin_unlock(&entry->lock);
  307. fsnotify_put_mark(entry); /* drop initial reference */
  308. return 0;
  309. }
  310. /* the first tagged inode becomes root of tree */
  311. static int tag_chunk(struct inode *inode, struct audit_tree *tree)
  312. {
  313. struct fsnotify_mark *old_entry, *chunk_entry;
  314. struct audit_tree *owner;
  315. struct audit_chunk *chunk, *old;
  316. struct node *p;
  317. int n;
  318. old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
  319. if (!old_entry)
  320. return create_chunk(inode, tree);
  321. old = container_of(old_entry, struct audit_chunk, mark);
  322. /* are we already there? */
  323. spin_lock(&hash_lock);
  324. for (n = 0; n < old->count; n++) {
  325. if (old->owners[n].owner == tree) {
  326. spin_unlock(&hash_lock);
  327. fsnotify_put_mark(old_entry);
  328. return 0;
  329. }
  330. }
  331. spin_unlock(&hash_lock);
  332. chunk = alloc_chunk(old->count + 1);
  333. if (!chunk) {
  334. fsnotify_put_mark(old_entry);
  335. return -ENOMEM;
  336. }
  337. chunk_entry = &chunk->mark;
  338. spin_lock(&old_entry->lock);
  339. if (!old_entry->inode) {
  340. /* old_entry is being shot, lets just lie */
  341. spin_unlock(&old_entry->lock);
  342. fsnotify_put_mark(old_entry);
  343. free_chunk(chunk);
  344. return -ENOENT;
  345. }
  346. if (fsnotify_add_mark(chunk_entry,
  347. old_entry->group, old_entry->inode, NULL, 1)) {
  348. spin_unlock(&old_entry->lock);
  349. fsnotify_put_mark(chunk_entry);
  350. fsnotify_put_mark(old_entry);
  351. return -ENOSPC;
  352. }
  353. /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
  354. spin_lock(&chunk_entry->lock);
  355. spin_lock(&hash_lock);
  356. /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
  357. if (tree->goner) {
  358. spin_unlock(&hash_lock);
  359. chunk->dead = 1;
  360. spin_unlock(&chunk_entry->lock);
  361. spin_unlock(&old_entry->lock);
  362. fsnotify_destroy_mark(chunk_entry, audit_tree_group);
  363. fsnotify_put_mark(chunk_entry);
  364. fsnotify_put_mark(old_entry);
  365. return 0;
  366. }
  367. list_replace_init(&old->trees, &chunk->trees);
  368. for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
  369. struct audit_tree *s = old->owners[n].owner;
  370. p->owner = s;
  371. p->index = old->owners[n].index;
  372. if (!s) /* result of fallback in untag */
  373. continue;
  374. get_tree(s);
  375. list_replace_init(&old->owners[n].list, &p->list);
  376. }
  377. p->index = (chunk->count - 1) | (1U<<31);
  378. p->owner = tree;
  379. get_tree(tree);
  380. list_add(&p->list, &tree->chunks);
  381. list_replace_rcu(&old->hash, &chunk->hash);
  382. list_for_each_entry(owner, &chunk->trees, same_root)
  383. owner->root = chunk;
  384. old->dead = 1;
  385. if (!tree->root) {
  386. tree->root = chunk;
  387. list_add(&tree->same_root, &chunk->trees);
  388. }
  389. spin_unlock(&hash_lock);
  390. spin_unlock(&chunk_entry->lock);
  391. spin_unlock(&old_entry->lock);
  392. fsnotify_destroy_mark(old_entry, audit_tree_group);
  393. fsnotify_put_mark(chunk_entry); /* drop initial reference */
  394. fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
  395. return 0;
  396. }
  397. static void audit_tree_log_remove_rule(struct audit_krule *rule)
  398. {
  399. struct audit_buffer *ab;
  400. ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
  401. if (unlikely(!ab))
  402. return;
  403. audit_log_format(ab, "op=remove_rule");
  404. audit_log_format(ab, " dir=");
  405. audit_log_untrustedstring(ab, rule->tree->pathname);
  406. audit_log_key(ab, rule->filterkey);
  407. audit_log_format(ab, " list=%d res=1", rule->listnr);
  408. audit_log_end(ab);
  409. }
  410. static void kill_rules(struct audit_tree *tree)
  411. {
  412. struct audit_krule *rule, *next;
  413. struct audit_entry *entry;
  414. list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
  415. entry = container_of(rule, struct audit_entry, rule);
  416. list_del_init(&rule->rlist);
  417. if (rule->tree) {
  418. /* not a half-baked one */
  419. audit_tree_log_remove_rule(rule);
  420. if (entry->rule.exe)
  421. audit_remove_mark(entry->rule.exe);
  422. rule->tree = NULL;
  423. list_del_rcu(&entry->list);
  424. list_del(&entry->rule.list);
  425. call_rcu(&entry->rcu, audit_free_rule_rcu);
  426. }
  427. }
  428. }
  429. /*
  430. * finish killing struct audit_tree
  431. */
  432. static void prune_one(struct audit_tree *victim)
  433. {
  434. spin_lock(&hash_lock);
  435. while (!list_empty(&victim->chunks)) {
  436. struct node *p;
  437. p = list_entry(victim->chunks.next, struct node, list);
  438. untag_chunk(p);
  439. }
  440. spin_unlock(&hash_lock);
  441. put_tree(victim);
  442. }
  443. /* trim the uncommitted chunks from tree */
  444. static void trim_marked(struct audit_tree *tree)
  445. {
  446. struct list_head *p, *q;
  447. spin_lock(&hash_lock);
  448. if (tree->goner) {
  449. spin_unlock(&hash_lock);
  450. return;
  451. }
  452. /* reorder */
  453. for (p = tree->chunks.next; p != &tree->chunks; p = q) {
  454. struct node *node = list_entry(p, struct node, list);
  455. q = p->next;
  456. if (node->index & (1U<<31)) {
  457. list_del_init(p);
  458. list_add(p, &tree->chunks);
  459. }
  460. }
  461. while (!list_empty(&tree->chunks)) {
  462. struct node *node;
  463. node = list_entry(tree->chunks.next, struct node, list);
  464. /* have we run out of marked? */
  465. if (!(node->index & (1U<<31)))
  466. break;
  467. untag_chunk(node);
  468. }
  469. if (!tree->root && !tree->goner) {
  470. tree->goner = 1;
  471. spin_unlock(&hash_lock);
  472. mutex_lock(&audit_filter_mutex);
  473. kill_rules(tree);
  474. list_del_init(&tree->list);
  475. mutex_unlock(&audit_filter_mutex);
  476. prune_one(tree);
  477. } else {
  478. spin_unlock(&hash_lock);
  479. }
  480. }
  481. static void audit_schedule_prune(void);
  482. /* called with audit_filter_mutex */
  483. int audit_remove_tree_rule(struct audit_krule *rule)
  484. {
  485. struct audit_tree *tree;
  486. tree = rule->tree;
  487. if (tree) {
  488. spin_lock(&hash_lock);
  489. list_del_init(&rule->rlist);
  490. if (list_empty(&tree->rules) && !tree->goner) {
  491. tree->root = NULL;
  492. list_del_init(&tree->same_root);
  493. tree->goner = 1;
  494. list_move(&tree->list, &prune_list);
  495. rule->tree = NULL;
  496. spin_unlock(&hash_lock);
  497. audit_schedule_prune();
  498. return 1;
  499. }
  500. rule->tree = NULL;
  501. spin_unlock(&hash_lock);
  502. return 1;
  503. }
  504. return 0;
  505. }
  506. static int compare_root(struct vfsmount *mnt, void *arg)
  507. {
  508. return d_backing_inode(mnt->mnt_root) == arg;
  509. }
  510. void audit_trim_trees(void)
  511. {
  512. struct list_head cursor;
  513. mutex_lock(&audit_filter_mutex);
  514. list_add(&cursor, &tree_list);
  515. while (cursor.next != &tree_list) {
  516. struct audit_tree *tree;
  517. struct path path;
  518. struct vfsmount *root_mnt;
  519. struct node *node;
  520. int err;
  521. tree = container_of(cursor.next, struct audit_tree, list);
  522. get_tree(tree);
  523. list_del(&cursor);
  524. list_add(&cursor, &tree->list);
  525. mutex_unlock(&audit_filter_mutex);
  526. err = kern_path(tree->pathname, 0, &path);
  527. if (err)
  528. goto skip_it;
  529. root_mnt = collect_mounts(&path);
  530. path_put(&path);
  531. if (IS_ERR(root_mnt))
  532. goto skip_it;
  533. spin_lock(&hash_lock);
  534. list_for_each_entry(node, &tree->chunks, list) {
  535. struct audit_chunk *chunk = find_chunk(node);
  536. /* this could be NULL if the watch is dying else where... */
  537. struct inode *inode = chunk->mark.inode;
  538. node->index |= 1U<<31;
  539. if (iterate_mounts(compare_root, inode, root_mnt))
  540. node->index &= ~(1U<<31);
  541. }
  542. spin_unlock(&hash_lock);
  543. trim_marked(tree);
  544. drop_collected_mounts(root_mnt);
  545. skip_it:
  546. put_tree(tree);
  547. mutex_lock(&audit_filter_mutex);
  548. }
  549. list_del(&cursor);
  550. mutex_unlock(&audit_filter_mutex);
  551. }
  552. int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
  553. {
  554. if (pathname[0] != '/' ||
  555. rule->listnr != AUDIT_FILTER_EXIT ||
  556. op != Audit_equal ||
  557. rule->inode_f || rule->watch || rule->tree)
  558. return -EINVAL;
  559. rule->tree = alloc_tree(pathname);
  560. if (!rule->tree)
  561. return -ENOMEM;
  562. return 0;
  563. }
  564. void audit_put_tree(struct audit_tree *tree)
  565. {
  566. put_tree(tree);
  567. }
  568. static int tag_mount(struct vfsmount *mnt, void *arg)
  569. {
  570. return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
  571. }
  572. /*
  573. * That gets run when evict_chunk() ends up needing to kill audit_tree.
  574. * Runs from a separate thread.
  575. */
  576. static int prune_tree_thread(void *unused)
  577. {
  578. for (;;) {
  579. if (list_empty(&prune_list)) {
  580. set_current_state(TASK_INTERRUPTIBLE);
  581. schedule();
  582. }
  583. mutex_lock(&audit_cmd_mutex);
  584. mutex_lock(&audit_filter_mutex);
  585. while (!list_empty(&prune_list)) {
  586. struct audit_tree *victim;
  587. victim = list_entry(prune_list.next,
  588. struct audit_tree, list);
  589. list_del_init(&victim->list);
  590. mutex_unlock(&audit_filter_mutex);
  591. prune_one(victim);
  592. mutex_lock(&audit_filter_mutex);
  593. }
  594. mutex_unlock(&audit_filter_mutex);
  595. mutex_unlock(&audit_cmd_mutex);
  596. }
  597. return 0;
  598. }
  599. static int audit_launch_prune(void)
  600. {
  601. if (prune_thread)
  602. return 0;
  603. prune_thread = kthread_run(prune_tree_thread, NULL,
  604. "audit_prune_tree");
  605. if (IS_ERR(prune_thread)) {
  606. pr_err("cannot start thread audit_prune_tree");
  607. prune_thread = NULL;
  608. return -ENOMEM;
  609. }
  610. return 0;
  611. }
  612. /* called with audit_filter_mutex */
  613. int audit_add_tree_rule(struct audit_krule *rule)
  614. {
  615. struct audit_tree *seed = rule->tree, *tree;
  616. struct path path;
  617. struct vfsmount *mnt;
  618. int err;
  619. rule->tree = NULL;
  620. list_for_each_entry(tree, &tree_list, list) {
  621. if (!strcmp(seed->pathname, tree->pathname)) {
  622. put_tree(seed);
  623. rule->tree = tree;
  624. list_add(&rule->rlist, &tree->rules);
  625. return 0;
  626. }
  627. }
  628. tree = seed;
  629. list_add(&tree->list, &tree_list);
  630. list_add(&rule->rlist, &tree->rules);
  631. /* do not set rule->tree yet */
  632. mutex_unlock(&audit_filter_mutex);
  633. if (unlikely(!prune_thread)) {
  634. err = audit_launch_prune();
  635. if (err)
  636. goto Err;
  637. }
  638. err = kern_path(tree->pathname, 0, &path);
  639. if (err)
  640. goto Err;
  641. mnt = collect_mounts(&path);
  642. path_put(&path);
  643. if (IS_ERR(mnt)) {
  644. err = PTR_ERR(mnt);
  645. goto Err;
  646. }
  647. get_tree(tree);
  648. err = iterate_mounts(tag_mount, tree, mnt);
  649. drop_collected_mounts(mnt);
  650. if (!err) {
  651. struct node *node;
  652. spin_lock(&hash_lock);
  653. list_for_each_entry(node, &tree->chunks, list)
  654. node->index &= ~(1U<<31);
  655. spin_unlock(&hash_lock);
  656. } else {
  657. trim_marked(tree);
  658. goto Err;
  659. }
  660. mutex_lock(&audit_filter_mutex);
  661. if (list_empty(&rule->rlist)) {
  662. put_tree(tree);
  663. return -ENOENT;
  664. }
  665. rule->tree = tree;
  666. put_tree(tree);
  667. return 0;
  668. Err:
  669. mutex_lock(&audit_filter_mutex);
  670. list_del_init(&tree->list);
  671. list_del_init(&tree->rules);
  672. put_tree(tree);
  673. return err;
  674. }
  675. int audit_tag_tree(char *old, char *new)
  676. {
  677. struct list_head cursor, barrier;
  678. int failed = 0;
  679. struct path path1, path2;
  680. struct vfsmount *tagged;
  681. int err;
  682. err = kern_path(new, 0, &path2);
  683. if (err)
  684. return err;
  685. tagged = collect_mounts(&path2);
  686. path_put(&path2);
  687. if (IS_ERR(tagged))
  688. return PTR_ERR(tagged);
  689. err = kern_path(old, 0, &path1);
  690. if (err) {
  691. drop_collected_mounts(tagged);
  692. return err;
  693. }
  694. mutex_lock(&audit_filter_mutex);
  695. list_add(&barrier, &tree_list);
  696. list_add(&cursor, &barrier);
  697. while (cursor.next != &tree_list) {
  698. struct audit_tree *tree;
  699. int good_one = 0;
  700. tree = container_of(cursor.next, struct audit_tree, list);
  701. get_tree(tree);
  702. list_del(&cursor);
  703. list_add(&cursor, &tree->list);
  704. mutex_unlock(&audit_filter_mutex);
  705. err = kern_path(tree->pathname, 0, &path2);
  706. if (!err) {
  707. good_one = path_is_under(&path1, &path2);
  708. path_put(&path2);
  709. }
  710. if (!good_one) {
  711. put_tree(tree);
  712. mutex_lock(&audit_filter_mutex);
  713. continue;
  714. }
  715. failed = iterate_mounts(tag_mount, tree, tagged);
  716. if (failed) {
  717. put_tree(tree);
  718. mutex_lock(&audit_filter_mutex);
  719. break;
  720. }
  721. mutex_lock(&audit_filter_mutex);
  722. spin_lock(&hash_lock);
  723. if (!tree->goner) {
  724. list_del(&tree->list);
  725. list_add(&tree->list, &tree_list);
  726. }
  727. spin_unlock(&hash_lock);
  728. put_tree(tree);
  729. }
  730. while (barrier.prev != &tree_list) {
  731. struct audit_tree *tree;
  732. tree = container_of(barrier.prev, struct audit_tree, list);
  733. get_tree(tree);
  734. list_del(&tree->list);
  735. list_add(&tree->list, &barrier);
  736. mutex_unlock(&audit_filter_mutex);
  737. if (!failed) {
  738. struct node *node;
  739. spin_lock(&hash_lock);
  740. list_for_each_entry(node, &tree->chunks, list)
  741. node->index &= ~(1U<<31);
  742. spin_unlock(&hash_lock);
  743. } else {
  744. trim_marked(tree);
  745. }
  746. put_tree(tree);
  747. mutex_lock(&audit_filter_mutex);
  748. }
  749. list_del(&barrier);
  750. list_del(&cursor);
  751. mutex_unlock(&audit_filter_mutex);
  752. path_put(&path1);
  753. drop_collected_mounts(tagged);
  754. return failed;
  755. }
  756. static void audit_schedule_prune(void)
  757. {
  758. wake_up_process(prune_thread);
  759. }
  760. /*
  761. * ... and that one is done if evict_chunk() decides to delay until the end
  762. * of syscall. Runs synchronously.
  763. */
  764. void audit_kill_trees(struct list_head *list)
  765. {
  766. mutex_lock(&audit_cmd_mutex);
  767. mutex_lock(&audit_filter_mutex);
  768. while (!list_empty(list)) {
  769. struct audit_tree *victim;
  770. victim = list_entry(list->next, struct audit_tree, list);
  771. kill_rules(victim);
  772. list_del_init(&victim->list);
  773. mutex_unlock(&audit_filter_mutex);
  774. prune_one(victim);
  775. mutex_lock(&audit_filter_mutex);
  776. }
  777. mutex_unlock(&audit_filter_mutex);
  778. mutex_unlock(&audit_cmd_mutex);
  779. }
  780. /*
  781. * Here comes the stuff asynchronous to auditctl operations
  782. */
  783. static void evict_chunk(struct audit_chunk *chunk)
  784. {
  785. struct audit_tree *owner;
  786. struct list_head *postponed = audit_killed_trees();
  787. int need_prune = 0;
  788. int n;
  789. if (chunk->dead)
  790. return;
  791. chunk->dead = 1;
  792. mutex_lock(&audit_filter_mutex);
  793. spin_lock(&hash_lock);
  794. while (!list_empty(&chunk->trees)) {
  795. owner = list_entry(chunk->trees.next,
  796. struct audit_tree, same_root);
  797. owner->goner = 1;
  798. owner->root = NULL;
  799. list_del_init(&owner->same_root);
  800. spin_unlock(&hash_lock);
  801. if (!postponed) {
  802. kill_rules(owner);
  803. list_move(&owner->list, &prune_list);
  804. need_prune = 1;
  805. } else {
  806. list_move(&owner->list, postponed);
  807. }
  808. spin_lock(&hash_lock);
  809. }
  810. list_del_rcu(&chunk->hash);
  811. for (n = 0; n < chunk->count; n++)
  812. list_del_init(&chunk->owners[n].list);
  813. spin_unlock(&hash_lock);
  814. mutex_unlock(&audit_filter_mutex);
  815. if (need_prune)
  816. audit_schedule_prune();
  817. }
  818. static int audit_tree_handle_event(struct fsnotify_group *group,
  819. struct inode *to_tell,
  820. struct fsnotify_mark *inode_mark,
  821. struct fsnotify_mark *vfsmount_mark,
  822. u32 mask, void *data, int data_type,
  823. const unsigned char *file_name, u32 cookie)
  824. {
  825. return 0;
  826. }
  827. static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
  828. {
  829. struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
  830. evict_chunk(chunk);
  831. /*
  832. * We are guaranteed to have at least one reference to the mark from
  833. * either the inode or the caller of fsnotify_destroy_mark().
  834. */
  835. BUG_ON(atomic_read(&entry->refcnt) < 1);
  836. }
  837. static const struct fsnotify_ops audit_tree_ops = {
  838. .handle_event = audit_tree_handle_event,
  839. .freeing_mark = audit_tree_freeing_mark,
  840. };
  841. static int __init audit_tree_init(void)
  842. {
  843. int i;
  844. audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
  845. if (IS_ERR(audit_tree_group))
  846. audit_panic("cannot initialize fsnotify group for rectree watches");
  847. for (i = 0; i < HASH_SIZE; i++)
  848. INIT_LIST_HEAD(&chunk_hash_heads[i]);
  849. return 0;
  850. }
  851. __initcall(audit_tree_init);