audit_tree.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "audit.h"
  3. #include <linux/fsnotify_backend.h>
  4. #include <linux/namei.h>
  5. #include <linux/mount.h>
  6. #include <linux/kthread.h>
  7. #include <linux/refcount.h>
  8. #include <linux/slab.h>
  9. struct audit_tree;
  10. struct audit_chunk;
  11. struct audit_tree {
  12. refcount_t count;
  13. int goner;
  14. struct audit_chunk *root;
  15. struct list_head chunks;
  16. struct list_head rules;
  17. struct list_head list;
  18. struct list_head same_root;
  19. struct rcu_head head;
  20. char pathname[];
  21. };
  22. struct audit_chunk {
  23. struct list_head hash;
  24. struct fsnotify_mark mark;
  25. struct list_head trees; /* with root here */
  26. int dead;
  27. int count;
  28. atomic_long_t refs;
  29. struct rcu_head head;
  30. struct node {
  31. struct list_head list;
  32. struct audit_tree *owner;
  33. unsigned index; /* index; upper bit indicates 'will prune' */
  34. } owners[];
  35. };
  36. static LIST_HEAD(tree_list);
  37. static LIST_HEAD(prune_list);
  38. static struct task_struct *prune_thread;
  39. /*
  40. * One struct chunk is attached to each inode of interest.
  41. * We replace struct chunk on tagging/untagging.
  42. * Rules have pointer to struct audit_tree.
  43. * Rules have struct list_head rlist forming a list of rules over
  44. * the same tree.
  45. * References to struct chunk are collected at audit_inode{,_child}()
  46. * time and used in AUDIT_TREE rule matching.
  47. * These references are dropped at the same time we are calling
  48. * audit_free_names(), etc.
  49. *
  50. * Cyclic lists galore:
  51. * tree.chunks anchors chunk.owners[].list hash_lock
  52. * tree.rules anchors rule.rlist audit_filter_mutex
  53. * chunk.trees anchors tree.same_root hash_lock
  54. * chunk.hash is a hash with middle bits of watch.inode as
  55. * a hash function. RCU, hash_lock
  56. *
  57. * tree is refcounted; one reference for "some rules on rules_list refer to
  58. * it", one for each chunk with pointer to it.
  59. *
  60. * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
  61. * of watch contributes 1 to .refs).
  62. *
  63. * node.index allows to get from node.list to containing chunk.
  64. * MSB of that sucker is stolen to mark taggings that we might have to
  65. * revert - several operations have very unpleasant cleanup logics and
  66. * that makes a difference. Some.
  67. */
  68. static struct fsnotify_group *audit_tree_group;
  69. static struct audit_tree *alloc_tree(const char *s)
  70. {
  71. struct audit_tree *tree;
  72. tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  73. if (tree) {
  74. refcount_set(&tree->count, 1);
  75. tree->goner = 0;
  76. INIT_LIST_HEAD(&tree->chunks);
  77. INIT_LIST_HEAD(&tree->rules);
  78. INIT_LIST_HEAD(&tree->list);
  79. INIT_LIST_HEAD(&tree->same_root);
  80. tree->root = NULL;
  81. strcpy(tree->pathname, s);
  82. }
  83. return tree;
  84. }
  85. static inline void get_tree(struct audit_tree *tree)
  86. {
  87. refcount_inc(&tree->count);
  88. }
  89. static inline void put_tree(struct audit_tree *tree)
  90. {
  91. if (refcount_dec_and_test(&tree->count))
  92. kfree_rcu(tree, head);
  93. }
  94. /* to avoid bringing the entire thing in audit.h */
  95. const char *audit_tree_path(struct audit_tree *tree)
  96. {
  97. return tree->pathname;
  98. }
  99. static void free_chunk(struct audit_chunk *chunk)
  100. {
  101. int i;
  102. for (i = 0; i < chunk->count; i++) {
  103. if (chunk->owners[i].owner)
  104. put_tree(chunk->owners[i].owner);
  105. }
  106. kfree(chunk);
  107. }
  108. void audit_put_chunk(struct audit_chunk *chunk)
  109. {
  110. if (atomic_long_dec_and_test(&chunk->refs))
  111. free_chunk(chunk);
  112. }
  113. static void __put_chunk(struct rcu_head *rcu)
  114. {
  115. struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
  116. audit_put_chunk(chunk);
  117. }
  118. static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
  119. {
  120. struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
  121. call_rcu(&chunk->head, __put_chunk);
  122. }
  123. static struct audit_chunk *alloc_chunk(int count)
  124. {
  125. struct audit_chunk *chunk;
  126. size_t size;
  127. int i;
  128. size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
  129. chunk = kzalloc(size, GFP_KERNEL);
  130. if (!chunk)
  131. return NULL;
  132. INIT_LIST_HEAD(&chunk->hash);
  133. INIT_LIST_HEAD(&chunk->trees);
  134. chunk->count = count;
  135. atomic_long_set(&chunk->refs, 1);
  136. for (i = 0; i < count; i++) {
  137. INIT_LIST_HEAD(&chunk->owners[i].list);
  138. chunk->owners[i].index = i;
  139. }
  140. fsnotify_init_mark(&chunk->mark, audit_tree_group);
  141. chunk->mark.mask = FS_IN_IGNORED;
  142. return chunk;
  143. }
  144. enum {HASH_SIZE = 128};
  145. static struct list_head chunk_hash_heads[HASH_SIZE];
  146. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
  147. /* Function to return search key in our hash from inode. */
  148. static unsigned long inode_to_key(const struct inode *inode)
  149. {
  150. /* Use address pointed to by connector->obj as the key */
  151. return (unsigned long)&inode->i_fsnotify_marks;
  152. }
  153. /*
  154. * Function to return search key in our hash from chunk. Key 0 is special and
  155. * should never be present in the hash.
  156. */
  157. static unsigned long chunk_to_key(struct audit_chunk *chunk)
  158. {
  159. /*
  160. * We have a reference to the mark so it should be attached to a
  161. * connector.
  162. */
  163. if (WARN_ON_ONCE(!chunk->mark.connector))
  164. return 0;
  165. return (unsigned long)chunk->mark.connector->obj;
  166. }
  167. static inline struct list_head *chunk_hash(unsigned long key)
  168. {
  169. unsigned long n = key / L1_CACHE_BYTES;
  170. return chunk_hash_heads + n % HASH_SIZE;
  171. }
  172. /* hash_lock & entry->lock is held by caller */
  173. static void insert_hash(struct audit_chunk *chunk)
  174. {
  175. unsigned long key = chunk_to_key(chunk);
  176. struct list_head *list;
  177. if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
  178. return;
  179. list = chunk_hash(key);
  180. list_add_rcu(&chunk->hash, list);
  181. }
  182. /* called under rcu_read_lock */
  183. struct audit_chunk *audit_tree_lookup(const struct inode *inode)
  184. {
  185. unsigned long key = inode_to_key(inode);
  186. struct list_head *list = chunk_hash(key);
  187. struct audit_chunk *p;
  188. list_for_each_entry_rcu(p, list, hash) {
  189. if (chunk_to_key(p) == key) {
  190. atomic_long_inc(&p->refs);
  191. return p;
  192. }
  193. }
  194. return NULL;
  195. }
  196. bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
  197. {
  198. int n;
  199. for (n = 0; n < chunk->count; n++)
  200. if (chunk->owners[n].owner == tree)
  201. return true;
  202. return false;
  203. }
  204. /* tagging and untagging inodes with trees */
  205. static struct audit_chunk *find_chunk(struct node *p)
  206. {
  207. int index = p->index & ~(1U<<31);
  208. p -= index;
  209. return container_of(p, struct audit_chunk, owners[0]);
  210. }
  211. static void untag_chunk(struct node *p)
  212. {
  213. struct audit_chunk *chunk = find_chunk(p);
  214. struct fsnotify_mark *entry = &chunk->mark;
  215. struct audit_chunk *new = NULL;
  216. struct audit_tree *owner;
  217. int size = chunk->count - 1;
  218. int i, j;
  219. fsnotify_get_mark(entry);
  220. spin_unlock(&hash_lock);
  221. if (size)
  222. new = alloc_chunk(size);
  223. mutex_lock(&entry->group->mark_mutex);
  224. spin_lock(&entry->lock);
  225. /*
  226. * mark_mutex protects mark from getting detached and thus also from
  227. * mark->connector->obj getting NULL.
  228. */
  229. if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
  230. spin_unlock(&entry->lock);
  231. mutex_unlock(&entry->group->mark_mutex);
  232. if (new)
  233. fsnotify_put_mark(&new->mark);
  234. goto out;
  235. }
  236. owner = p->owner;
  237. if (!size) {
  238. chunk->dead = 1;
  239. spin_lock(&hash_lock);
  240. list_del_init(&chunk->trees);
  241. if (owner->root == chunk)
  242. owner->root = NULL;
  243. list_del_init(&p->list);
  244. list_del_rcu(&chunk->hash);
  245. spin_unlock(&hash_lock);
  246. spin_unlock(&entry->lock);
  247. mutex_unlock(&entry->group->mark_mutex);
  248. fsnotify_destroy_mark(entry, audit_tree_group);
  249. goto out;
  250. }
  251. if (!new)
  252. goto Fallback;
  253. if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
  254. FSNOTIFY_OBJ_TYPE_INODE, 1)) {
  255. fsnotify_put_mark(&new->mark);
  256. goto Fallback;
  257. }
  258. chunk->dead = 1;
  259. spin_lock(&hash_lock);
  260. list_replace_init(&chunk->trees, &new->trees);
  261. if (owner->root == chunk) {
  262. list_del_init(&owner->same_root);
  263. owner->root = NULL;
  264. }
  265. for (i = j = 0; j <= size; i++, j++) {
  266. struct audit_tree *s;
  267. if (&chunk->owners[j] == p) {
  268. list_del_init(&p->list);
  269. i--;
  270. continue;
  271. }
  272. s = chunk->owners[j].owner;
  273. new->owners[i].owner = s;
  274. new->owners[i].index = chunk->owners[j].index - j + i;
  275. if (!s) /* result of earlier fallback */
  276. continue;
  277. get_tree(s);
  278. list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
  279. }
  280. list_replace_rcu(&chunk->hash, &new->hash);
  281. list_for_each_entry(owner, &new->trees, same_root)
  282. owner->root = new;
  283. spin_unlock(&hash_lock);
  284. spin_unlock(&entry->lock);
  285. mutex_unlock(&entry->group->mark_mutex);
  286. fsnotify_destroy_mark(entry, audit_tree_group);
  287. fsnotify_put_mark(&new->mark); /* drop initial reference */
  288. goto out;
  289. Fallback:
  290. // do the best we can
  291. spin_lock(&hash_lock);
  292. if (owner->root == chunk) {
  293. list_del_init(&owner->same_root);
  294. owner->root = NULL;
  295. }
  296. list_del_init(&p->list);
  297. p->owner = NULL;
  298. put_tree(owner);
  299. spin_unlock(&hash_lock);
  300. spin_unlock(&entry->lock);
  301. mutex_unlock(&entry->group->mark_mutex);
  302. out:
  303. fsnotify_put_mark(entry);
  304. spin_lock(&hash_lock);
  305. }
  306. static int create_chunk(struct inode *inode, struct audit_tree *tree)
  307. {
  308. struct fsnotify_mark *entry;
  309. struct audit_chunk *chunk = alloc_chunk(1);
  310. if (!chunk)
  311. return -ENOMEM;
  312. entry = &chunk->mark;
  313. if (fsnotify_add_inode_mark(entry, inode, 0)) {
  314. fsnotify_put_mark(entry);
  315. return -ENOSPC;
  316. }
  317. spin_lock(&entry->lock);
  318. spin_lock(&hash_lock);
  319. if (tree->goner) {
  320. spin_unlock(&hash_lock);
  321. chunk->dead = 1;
  322. spin_unlock(&entry->lock);
  323. fsnotify_destroy_mark(entry, audit_tree_group);
  324. fsnotify_put_mark(entry);
  325. return 0;
  326. }
  327. chunk->owners[0].index = (1U << 31);
  328. chunk->owners[0].owner = tree;
  329. get_tree(tree);
  330. list_add(&chunk->owners[0].list, &tree->chunks);
  331. if (!tree->root) {
  332. tree->root = chunk;
  333. list_add(&tree->same_root, &chunk->trees);
  334. }
  335. insert_hash(chunk);
  336. spin_unlock(&hash_lock);
  337. spin_unlock(&entry->lock);
  338. fsnotify_put_mark(entry); /* drop initial reference */
  339. return 0;
  340. }
  341. /* the first tagged inode becomes root of tree */
  342. static int tag_chunk(struct inode *inode, struct audit_tree *tree)
  343. {
  344. struct fsnotify_mark *old_entry, *chunk_entry;
  345. struct audit_tree *owner;
  346. struct audit_chunk *chunk, *old;
  347. struct node *p;
  348. int n;
  349. old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
  350. audit_tree_group);
  351. if (!old_entry)
  352. return create_chunk(inode, tree);
  353. old = container_of(old_entry, struct audit_chunk, mark);
  354. /* are we already there? */
  355. spin_lock(&hash_lock);
  356. for (n = 0; n < old->count; n++) {
  357. if (old->owners[n].owner == tree) {
  358. spin_unlock(&hash_lock);
  359. fsnotify_put_mark(old_entry);
  360. return 0;
  361. }
  362. }
  363. spin_unlock(&hash_lock);
  364. chunk = alloc_chunk(old->count + 1);
  365. if (!chunk) {
  366. fsnotify_put_mark(old_entry);
  367. return -ENOMEM;
  368. }
  369. chunk_entry = &chunk->mark;
  370. mutex_lock(&old_entry->group->mark_mutex);
  371. spin_lock(&old_entry->lock);
  372. /*
  373. * mark_mutex protects mark from getting detached and thus also from
  374. * mark->connector->obj getting NULL.
  375. */
  376. if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
  377. /* old_entry is being shot, lets just lie */
  378. spin_unlock(&old_entry->lock);
  379. mutex_unlock(&old_entry->group->mark_mutex);
  380. fsnotify_put_mark(old_entry);
  381. fsnotify_put_mark(&chunk->mark);
  382. return -ENOENT;
  383. }
  384. if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
  385. FSNOTIFY_OBJ_TYPE_INODE, 1)) {
  386. spin_unlock(&old_entry->lock);
  387. mutex_unlock(&old_entry->group->mark_mutex);
  388. fsnotify_put_mark(chunk_entry);
  389. fsnotify_put_mark(old_entry);
  390. return -ENOSPC;
  391. }
  392. /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
  393. spin_lock(&chunk_entry->lock);
  394. spin_lock(&hash_lock);
  395. /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
  396. if (tree->goner) {
  397. spin_unlock(&hash_lock);
  398. chunk->dead = 1;
  399. spin_unlock(&chunk_entry->lock);
  400. spin_unlock(&old_entry->lock);
  401. mutex_unlock(&old_entry->group->mark_mutex);
  402. fsnotify_destroy_mark(chunk_entry, audit_tree_group);
  403. fsnotify_put_mark(chunk_entry);
  404. fsnotify_put_mark(old_entry);
  405. return 0;
  406. }
  407. list_replace_init(&old->trees, &chunk->trees);
  408. for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
  409. struct audit_tree *s = old->owners[n].owner;
  410. p->owner = s;
  411. p->index = old->owners[n].index;
  412. if (!s) /* result of fallback in untag */
  413. continue;
  414. get_tree(s);
  415. list_replace_init(&old->owners[n].list, &p->list);
  416. }
  417. p->index = (chunk->count - 1) | (1U<<31);
  418. p->owner = tree;
  419. get_tree(tree);
  420. list_add(&p->list, &tree->chunks);
  421. list_replace_rcu(&old->hash, &chunk->hash);
  422. list_for_each_entry(owner, &chunk->trees, same_root)
  423. owner->root = chunk;
  424. old->dead = 1;
  425. if (!tree->root) {
  426. tree->root = chunk;
  427. list_add(&tree->same_root, &chunk->trees);
  428. }
  429. spin_unlock(&hash_lock);
  430. spin_unlock(&chunk_entry->lock);
  431. spin_unlock(&old_entry->lock);
  432. mutex_unlock(&old_entry->group->mark_mutex);
  433. fsnotify_destroy_mark(old_entry, audit_tree_group);
  434. fsnotify_put_mark(chunk_entry); /* drop initial reference */
  435. fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
  436. return 0;
  437. }
  438. static void audit_tree_log_remove_rule(struct audit_krule *rule)
  439. {
  440. struct audit_buffer *ab;
  441. if (!audit_enabled)
  442. return;
  443. ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
  444. if (unlikely(!ab))
  445. return;
  446. audit_log_format(ab, "op=remove_rule");
  447. audit_log_format(ab, " dir=");
  448. audit_log_untrustedstring(ab, rule->tree->pathname);
  449. audit_log_key(ab, rule->filterkey);
  450. audit_log_format(ab, " list=%d res=1", rule->listnr);
  451. audit_log_end(ab);
  452. }
  453. static void kill_rules(struct audit_tree *tree)
  454. {
  455. struct audit_krule *rule, *next;
  456. struct audit_entry *entry;
  457. list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
  458. entry = container_of(rule, struct audit_entry, rule);
  459. list_del_init(&rule->rlist);
  460. if (rule->tree) {
  461. /* not a half-baked one */
  462. audit_tree_log_remove_rule(rule);
  463. if (entry->rule.exe)
  464. audit_remove_mark(entry->rule.exe);
  465. rule->tree = NULL;
  466. list_del_rcu(&entry->list);
  467. list_del(&entry->rule.list);
  468. call_rcu(&entry->rcu, audit_free_rule_rcu);
  469. }
  470. }
  471. }
  472. /*
  473. * finish killing struct audit_tree
  474. */
  475. static void prune_one(struct audit_tree *victim)
  476. {
  477. spin_lock(&hash_lock);
  478. while (!list_empty(&victim->chunks)) {
  479. struct node *p;
  480. p = list_entry(victim->chunks.next, struct node, list);
  481. untag_chunk(p);
  482. }
  483. spin_unlock(&hash_lock);
  484. put_tree(victim);
  485. }
  486. /* trim the uncommitted chunks from tree */
  487. static void trim_marked(struct audit_tree *tree)
  488. {
  489. struct list_head *p, *q;
  490. spin_lock(&hash_lock);
  491. if (tree->goner) {
  492. spin_unlock(&hash_lock);
  493. return;
  494. }
  495. /* reorder */
  496. for (p = tree->chunks.next; p != &tree->chunks; p = q) {
  497. struct node *node = list_entry(p, struct node, list);
  498. q = p->next;
  499. if (node->index & (1U<<31)) {
  500. list_del_init(p);
  501. list_add(p, &tree->chunks);
  502. }
  503. }
  504. while (!list_empty(&tree->chunks)) {
  505. struct node *node;
  506. node = list_entry(tree->chunks.next, struct node, list);
  507. /* have we run out of marked? */
  508. if (!(node->index & (1U<<31)))
  509. break;
  510. untag_chunk(node);
  511. }
  512. if (!tree->root && !tree->goner) {
  513. tree->goner = 1;
  514. spin_unlock(&hash_lock);
  515. mutex_lock(&audit_filter_mutex);
  516. kill_rules(tree);
  517. list_del_init(&tree->list);
  518. mutex_unlock(&audit_filter_mutex);
  519. prune_one(tree);
  520. } else {
  521. spin_unlock(&hash_lock);
  522. }
  523. }
  524. static void audit_schedule_prune(void);
  525. /* called with audit_filter_mutex */
  526. int audit_remove_tree_rule(struct audit_krule *rule)
  527. {
  528. struct audit_tree *tree;
  529. tree = rule->tree;
  530. if (tree) {
  531. spin_lock(&hash_lock);
  532. list_del_init(&rule->rlist);
  533. if (list_empty(&tree->rules) && !tree->goner) {
  534. tree->root = NULL;
  535. list_del_init(&tree->same_root);
  536. tree->goner = 1;
  537. list_move(&tree->list, &prune_list);
  538. rule->tree = NULL;
  539. spin_unlock(&hash_lock);
  540. audit_schedule_prune();
  541. return 1;
  542. }
  543. rule->tree = NULL;
  544. spin_unlock(&hash_lock);
  545. return 1;
  546. }
  547. return 0;
  548. }
  549. static int compare_root(struct vfsmount *mnt, void *arg)
  550. {
  551. return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
  552. (unsigned long)arg;
  553. }
  554. void audit_trim_trees(void)
  555. {
  556. struct list_head cursor;
  557. mutex_lock(&audit_filter_mutex);
  558. list_add(&cursor, &tree_list);
  559. while (cursor.next != &tree_list) {
  560. struct audit_tree *tree;
  561. struct path path;
  562. struct vfsmount *root_mnt;
  563. struct node *node;
  564. int err;
  565. tree = container_of(cursor.next, struct audit_tree, list);
  566. get_tree(tree);
  567. list_del(&cursor);
  568. list_add(&cursor, &tree->list);
  569. mutex_unlock(&audit_filter_mutex);
  570. err = kern_path(tree->pathname, 0, &path);
  571. if (err)
  572. goto skip_it;
  573. root_mnt = collect_mounts(&path);
  574. path_put(&path);
  575. if (IS_ERR(root_mnt))
  576. goto skip_it;
  577. spin_lock(&hash_lock);
  578. list_for_each_entry(node, &tree->chunks, list) {
  579. struct audit_chunk *chunk = find_chunk(node);
  580. /* this could be NULL if the watch is dying else where... */
  581. node->index |= 1U<<31;
  582. if (iterate_mounts(compare_root,
  583. (void *)chunk_to_key(chunk),
  584. root_mnt))
  585. node->index &= ~(1U<<31);
  586. }
  587. spin_unlock(&hash_lock);
  588. trim_marked(tree);
  589. drop_collected_mounts(root_mnt);
  590. skip_it:
  591. put_tree(tree);
  592. mutex_lock(&audit_filter_mutex);
  593. }
  594. list_del(&cursor);
  595. mutex_unlock(&audit_filter_mutex);
  596. }
  597. int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
  598. {
  599. if (pathname[0] != '/' ||
  600. rule->listnr != AUDIT_FILTER_EXIT ||
  601. op != Audit_equal ||
  602. rule->inode_f || rule->watch || rule->tree)
  603. return -EINVAL;
  604. rule->tree = alloc_tree(pathname);
  605. if (!rule->tree)
  606. return -ENOMEM;
  607. return 0;
  608. }
  609. void audit_put_tree(struct audit_tree *tree)
  610. {
  611. put_tree(tree);
  612. }
  613. static int tag_mount(struct vfsmount *mnt, void *arg)
  614. {
  615. return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
  616. }
  617. /*
  618. * That gets run when evict_chunk() ends up needing to kill audit_tree.
  619. * Runs from a separate thread.
  620. */
  621. static int prune_tree_thread(void *unused)
  622. {
  623. for (;;) {
  624. if (list_empty(&prune_list)) {
  625. set_current_state(TASK_INTERRUPTIBLE);
  626. schedule();
  627. }
  628. audit_ctl_lock();
  629. mutex_lock(&audit_filter_mutex);
  630. while (!list_empty(&prune_list)) {
  631. struct audit_tree *victim;
  632. victim = list_entry(prune_list.next,
  633. struct audit_tree, list);
  634. list_del_init(&victim->list);
  635. mutex_unlock(&audit_filter_mutex);
  636. prune_one(victim);
  637. mutex_lock(&audit_filter_mutex);
  638. }
  639. mutex_unlock(&audit_filter_mutex);
  640. audit_ctl_unlock();
  641. }
  642. return 0;
  643. }
  644. static int audit_launch_prune(void)
  645. {
  646. if (prune_thread)
  647. return 0;
  648. prune_thread = kthread_run(prune_tree_thread, NULL,
  649. "audit_prune_tree");
  650. if (IS_ERR(prune_thread)) {
  651. pr_err("cannot start thread audit_prune_tree");
  652. prune_thread = NULL;
  653. return -ENOMEM;
  654. }
  655. return 0;
  656. }
  657. /* called with audit_filter_mutex */
  658. int audit_add_tree_rule(struct audit_krule *rule)
  659. {
  660. struct audit_tree *seed = rule->tree, *tree;
  661. struct path path;
  662. struct vfsmount *mnt;
  663. int err;
  664. rule->tree = NULL;
  665. list_for_each_entry(tree, &tree_list, list) {
  666. if (!strcmp(seed->pathname, tree->pathname)) {
  667. put_tree(seed);
  668. rule->tree = tree;
  669. list_add(&rule->rlist, &tree->rules);
  670. return 0;
  671. }
  672. }
  673. tree = seed;
  674. list_add(&tree->list, &tree_list);
  675. list_add(&rule->rlist, &tree->rules);
  676. /* do not set rule->tree yet */
  677. mutex_unlock(&audit_filter_mutex);
  678. if (unlikely(!prune_thread)) {
  679. err = audit_launch_prune();
  680. if (err)
  681. goto Err;
  682. }
  683. err = kern_path(tree->pathname, 0, &path);
  684. if (err)
  685. goto Err;
  686. mnt = collect_mounts(&path);
  687. path_put(&path);
  688. if (IS_ERR(mnt)) {
  689. err = PTR_ERR(mnt);
  690. goto Err;
  691. }
  692. get_tree(tree);
  693. err = iterate_mounts(tag_mount, tree, mnt);
  694. drop_collected_mounts(mnt);
  695. if (!err) {
  696. struct node *node;
  697. spin_lock(&hash_lock);
  698. list_for_each_entry(node, &tree->chunks, list)
  699. node->index &= ~(1U<<31);
  700. spin_unlock(&hash_lock);
  701. } else {
  702. trim_marked(tree);
  703. goto Err;
  704. }
  705. mutex_lock(&audit_filter_mutex);
  706. if (list_empty(&rule->rlist)) {
  707. put_tree(tree);
  708. return -ENOENT;
  709. }
  710. rule->tree = tree;
  711. put_tree(tree);
  712. return 0;
  713. Err:
  714. mutex_lock(&audit_filter_mutex);
  715. list_del_init(&tree->list);
  716. list_del_init(&tree->rules);
  717. put_tree(tree);
  718. return err;
  719. }
  720. int audit_tag_tree(char *old, char *new)
  721. {
  722. struct list_head cursor, barrier;
  723. int failed = 0;
  724. struct path path1, path2;
  725. struct vfsmount *tagged;
  726. int err;
  727. err = kern_path(new, 0, &path2);
  728. if (err)
  729. return err;
  730. tagged = collect_mounts(&path2);
  731. path_put(&path2);
  732. if (IS_ERR(tagged))
  733. return PTR_ERR(tagged);
  734. err = kern_path(old, 0, &path1);
  735. if (err) {
  736. drop_collected_mounts(tagged);
  737. return err;
  738. }
  739. mutex_lock(&audit_filter_mutex);
  740. list_add(&barrier, &tree_list);
  741. list_add(&cursor, &barrier);
  742. while (cursor.next != &tree_list) {
  743. struct audit_tree *tree;
  744. int good_one = 0;
  745. tree = container_of(cursor.next, struct audit_tree, list);
  746. get_tree(tree);
  747. list_del(&cursor);
  748. list_add(&cursor, &tree->list);
  749. mutex_unlock(&audit_filter_mutex);
  750. err = kern_path(tree->pathname, 0, &path2);
  751. if (!err) {
  752. good_one = path_is_under(&path1, &path2);
  753. path_put(&path2);
  754. }
  755. if (!good_one) {
  756. put_tree(tree);
  757. mutex_lock(&audit_filter_mutex);
  758. continue;
  759. }
  760. failed = iterate_mounts(tag_mount, tree, tagged);
  761. if (failed) {
  762. put_tree(tree);
  763. mutex_lock(&audit_filter_mutex);
  764. break;
  765. }
  766. mutex_lock(&audit_filter_mutex);
  767. spin_lock(&hash_lock);
  768. if (!tree->goner) {
  769. list_del(&tree->list);
  770. list_add(&tree->list, &tree_list);
  771. }
  772. spin_unlock(&hash_lock);
  773. put_tree(tree);
  774. }
  775. while (barrier.prev != &tree_list) {
  776. struct audit_tree *tree;
  777. tree = container_of(barrier.prev, struct audit_tree, list);
  778. get_tree(tree);
  779. list_del(&tree->list);
  780. list_add(&tree->list, &barrier);
  781. mutex_unlock(&audit_filter_mutex);
  782. if (!failed) {
  783. struct node *node;
  784. spin_lock(&hash_lock);
  785. list_for_each_entry(node, &tree->chunks, list)
  786. node->index &= ~(1U<<31);
  787. spin_unlock(&hash_lock);
  788. } else {
  789. trim_marked(tree);
  790. }
  791. put_tree(tree);
  792. mutex_lock(&audit_filter_mutex);
  793. }
  794. list_del(&barrier);
  795. list_del(&cursor);
  796. mutex_unlock(&audit_filter_mutex);
  797. path_put(&path1);
  798. drop_collected_mounts(tagged);
  799. return failed;
  800. }
  801. static void audit_schedule_prune(void)
  802. {
  803. wake_up_process(prune_thread);
  804. }
  805. /*
  806. * ... and that one is done if evict_chunk() decides to delay until the end
  807. * of syscall. Runs synchronously.
  808. */
  809. void audit_kill_trees(struct list_head *list)
  810. {
  811. audit_ctl_lock();
  812. mutex_lock(&audit_filter_mutex);
  813. while (!list_empty(list)) {
  814. struct audit_tree *victim;
  815. victim = list_entry(list->next, struct audit_tree, list);
  816. kill_rules(victim);
  817. list_del_init(&victim->list);
  818. mutex_unlock(&audit_filter_mutex);
  819. prune_one(victim);
  820. mutex_lock(&audit_filter_mutex);
  821. }
  822. mutex_unlock(&audit_filter_mutex);
  823. audit_ctl_unlock();
  824. }
  825. /*
  826. * Here comes the stuff asynchronous to auditctl operations
  827. */
  828. static void evict_chunk(struct audit_chunk *chunk)
  829. {
  830. struct audit_tree *owner;
  831. struct list_head *postponed = audit_killed_trees();
  832. int need_prune = 0;
  833. int n;
  834. if (chunk->dead)
  835. return;
  836. chunk->dead = 1;
  837. mutex_lock(&audit_filter_mutex);
  838. spin_lock(&hash_lock);
  839. while (!list_empty(&chunk->trees)) {
  840. owner = list_entry(chunk->trees.next,
  841. struct audit_tree, same_root);
  842. owner->goner = 1;
  843. owner->root = NULL;
  844. list_del_init(&owner->same_root);
  845. spin_unlock(&hash_lock);
  846. if (!postponed) {
  847. kill_rules(owner);
  848. list_move(&owner->list, &prune_list);
  849. need_prune = 1;
  850. } else {
  851. list_move(&owner->list, postponed);
  852. }
  853. spin_lock(&hash_lock);
  854. }
  855. list_del_rcu(&chunk->hash);
  856. for (n = 0; n < chunk->count; n++)
  857. list_del_init(&chunk->owners[n].list);
  858. spin_unlock(&hash_lock);
  859. mutex_unlock(&audit_filter_mutex);
  860. if (need_prune)
  861. audit_schedule_prune();
  862. }
  863. static int audit_tree_handle_event(struct fsnotify_group *group,
  864. struct inode *to_tell,
  865. u32 mask, const void *data, int data_type,
  866. const unsigned char *file_name, u32 cookie,
  867. struct fsnotify_iter_info *iter_info)
  868. {
  869. return 0;
  870. }
  871. static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
  872. {
  873. struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
  874. evict_chunk(chunk);
  875. /*
  876. * We are guaranteed to have at least one reference to the mark from
  877. * either the inode or the caller of fsnotify_destroy_mark().
  878. */
  879. BUG_ON(refcount_read(&entry->refcnt) < 1);
  880. }
  881. static const struct fsnotify_ops audit_tree_ops = {
  882. .handle_event = audit_tree_handle_event,
  883. .freeing_mark = audit_tree_freeing_mark,
  884. .free_mark = audit_tree_destroy_watch,
  885. };
  886. static int __init audit_tree_init(void)
  887. {
  888. int i;
  889. audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
  890. if (IS_ERR(audit_tree_group))
  891. audit_panic("cannot initialize fsnotify group for rectree watches");
  892. for (i = 0; i < HASH_SIZE; i++)
  893. INIT_LIST_HEAD(&chunk_hash_heads[i]);
  894. return 0;
  895. }
  896. __initcall(audit_tree_init);