label.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124
  1. /*
  2. * AppArmor security module
  3. *
  4. * This file contains AppArmor label definitions
  5. *
  6. * Copyright 2017 Canonical Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation, version 2 of the
  11. * License.
  12. */
  13. #include <linux/audit.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/sort.h>
  16. #include "include/apparmor.h"
  17. #include "include/context.h"
  18. #include "include/label.h"
  19. #include "include/policy.h"
  20. #include "include/secid.h"
  21. /*
  22. * the aa_label represents the set of profiles confining an object
  23. *
  24. * Labels maintain a reference count to the set of pointers they reference
  25. * Labels are ref counted by
  26. * tasks and object via the security field/security context off the field
  27. * code - will take a ref count on a label if it needs the label
  28. * beyond what is possible with an rcu_read_lock.
  29. * profiles - each profile is a label
  30. * secids - a pinned secid will keep a refcount of the label it is
  31. * referencing
  32. * objects - inode, files, sockets, ...
  33. *
  34. * Labels are not ref counted by the label set, so they maybe removed and
  35. * freed when no longer in use.
  36. *
  37. */
  38. #define PROXY_POISON 97
  39. #define LABEL_POISON 100
  40. static void free_proxy(struct aa_proxy *proxy)
  41. {
  42. if (proxy) {
  43. /* p->label will not updated any more as p is dead */
  44. aa_put_label(rcu_dereference_protected(proxy->label, true));
  45. memset(proxy, 0, sizeof(*proxy));
  46. RCU_INIT_POINTER(proxy->label, (struct aa_label *)PROXY_POISON);
  47. kfree(proxy);
  48. }
  49. }
  50. void aa_proxy_kref(struct kref *kref)
  51. {
  52. struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count);
  53. free_proxy(proxy);
  54. }
  55. struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp)
  56. {
  57. struct aa_proxy *new;
  58. new = kzalloc(sizeof(struct aa_proxy), gfp);
  59. if (new) {
  60. kref_init(&new->count);
  61. rcu_assign_pointer(new->label, aa_get_label(label));
  62. }
  63. return new;
  64. }
  65. /* requires profile list write lock held */
  66. void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
  67. {
  68. struct aa_label *tmp;
  69. AA_BUG(!orig);
  70. AA_BUG(!new);
  71. lockdep_assert_held_exclusive(&labels_set(orig)->lock);
  72. tmp = rcu_dereference_protected(orig->proxy->label,
  73. &labels_ns(orig)->lock);
  74. rcu_assign_pointer(orig->proxy->label, aa_get_label(new));
  75. orig->flags |= FLAG_STALE;
  76. aa_put_label(tmp);
  77. }
  78. static void __proxy_share(struct aa_label *old, struct aa_label *new)
  79. {
  80. struct aa_proxy *proxy = new->proxy;
  81. new->proxy = aa_get_proxy(old->proxy);
  82. __aa_proxy_redirect(old, new);
  83. aa_put_proxy(proxy);
  84. }
  85. /**
  86. * ns_cmp - compare ns for label set ordering
  87. * @a: ns to compare (NOT NULL)
  88. * @b: ns to compare (NOT NULL)
  89. *
  90. * Returns: <0 if a < b
  91. * ==0 if a == b
  92. * >0 if a > b
  93. */
  94. static int ns_cmp(struct aa_ns *a, struct aa_ns *b)
  95. {
  96. int res;
  97. AA_BUG(!a);
  98. AA_BUG(!b);
  99. AA_BUG(!a->base.hname);
  100. AA_BUG(!b->base.hname);
  101. if (a == b)
  102. return 0;
  103. res = a->level - b->level;
  104. if (res)
  105. return res;
  106. return strcmp(a->base.hname, b->base.hname);
  107. }
  108. /**
  109. * profile_cmp - profile comparision for set ordering
  110. * @a: profile to compare (NOT NULL)
  111. * @b: profile to compare (NOT NULL)
  112. *
  113. * Returns: <0 if a < b
  114. * ==0 if a == b
  115. * >0 if a > b
  116. */
  117. static int profile_cmp(struct aa_profile *a, struct aa_profile *b)
  118. {
  119. int res;
  120. AA_BUG(!a);
  121. AA_BUG(!b);
  122. AA_BUG(!a->ns);
  123. AA_BUG(!b->ns);
  124. AA_BUG(!a->base.hname);
  125. AA_BUG(!b->base.hname);
  126. if (a == b || a->base.hname == b->base.hname)
  127. return 0;
  128. res = ns_cmp(a->ns, b->ns);
  129. if (res)
  130. return res;
  131. return strcmp(a->base.hname, b->base.hname);
  132. }
  133. /**
  134. * vec_cmp - label comparision for set ordering
  135. * @a: label to compare (NOT NULL)
  136. * @vec: vector of profiles to compare (NOT NULL)
  137. * @n: length of @vec
  138. *
  139. * Returns: <0 if a < vec
  140. * ==0 if a == vec
  141. * >0 if a > vec
  142. */
  143. static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn)
  144. {
  145. int i;
  146. AA_BUG(!a);
  147. AA_BUG(!*a);
  148. AA_BUG(!b);
  149. AA_BUG(!*b);
  150. AA_BUG(an <= 0);
  151. AA_BUG(bn <= 0);
  152. for (i = 0; i < an && i < bn; i++) {
  153. int res = profile_cmp(a[i], b[i]);
  154. if (res != 0)
  155. return res;
  156. }
  157. return an - bn;
  158. }
  159. static bool vec_is_stale(struct aa_profile **vec, int n)
  160. {
  161. int i;
  162. AA_BUG(!vec);
  163. for (i = 0; i < n; i++) {
  164. if (profile_is_stale(vec[i]))
  165. return true;
  166. }
  167. return false;
  168. }
  169. static bool vec_unconfined(struct aa_profile **vec, int n)
  170. {
  171. int i;
  172. AA_BUG(!vec);
  173. for (i = 0; i < n; i++) {
  174. if (!profile_unconfined(vec[i]))
  175. return false;
  176. }
  177. return true;
  178. }
  179. static int sort_cmp(const void *a, const void *b)
  180. {
  181. return profile_cmp(*(struct aa_profile **)a, *(struct aa_profile **)b);
  182. }
  183. /*
  184. * assumes vec is sorted
  185. * Assumes @vec has null terminator at vec[n], and will null terminate
  186. * vec[n - dups]
  187. */
  188. static inline int unique(struct aa_profile **vec, int n)
  189. {
  190. int i, pos, dups = 0;
  191. AA_BUG(n < 1);
  192. AA_BUG(!vec);
  193. pos = 0;
  194. for (i = 1; i < n; i++) {
  195. int res = profile_cmp(vec[pos], vec[i]);
  196. AA_BUG(res > 0, "vec not sorted");
  197. if (res == 0) {
  198. /* drop duplicate */
  199. aa_put_profile(vec[i]);
  200. dups++;
  201. continue;
  202. }
  203. pos++;
  204. if (dups)
  205. vec[pos] = vec[i];
  206. }
  207. AA_BUG(dups < 0);
  208. return dups;
  209. }
  210. /**
  211. * aa_vec_unique - canonical sort and unique a list of profiles
  212. * @n: number of refcounted profiles in the list (@n > 0)
  213. * @vec: list of profiles to sort and merge
  214. *
  215. * Returns: the number of duplicates eliminated == references put
  216. *
  217. * If @flags & VEC_FLAG_TERMINATE @vec has null terminator at vec[n], and will
  218. * null terminate vec[n - dups]
  219. */
  220. int aa_vec_unique(struct aa_profile **vec, int n, int flags)
  221. {
  222. int i, dups = 0;
  223. AA_BUG(n < 1);
  224. AA_BUG(!vec);
  225. /* vecs are usually small and inorder, have a fallback for larger */
  226. if (n > 8) {
  227. sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL);
  228. dups = unique(vec, n);
  229. goto out;
  230. }
  231. /* insertion sort + unique in one */
  232. for (i = 1; i < n; i++) {
  233. struct aa_profile *tmp = vec[i];
  234. int pos, j;
  235. for (pos = i - 1 - dups; pos >= 0; pos--) {
  236. int res = profile_cmp(vec[pos], tmp);
  237. if (res == 0) {
  238. /* drop duplicate entry */
  239. aa_put_profile(tmp);
  240. dups++;
  241. goto continue_outer;
  242. } else if (res < 0)
  243. break;
  244. }
  245. /* pos is at entry < tmp, or index -1. Set to insert pos */
  246. pos++;
  247. for (j = i - dups; j > pos; j--)
  248. vec[j] = vec[j - 1];
  249. vec[pos] = tmp;
  250. continue_outer:
  251. ;
  252. }
  253. AA_BUG(dups < 0);
  254. out:
  255. if (flags & VEC_FLAG_TERMINATE)
  256. vec[n - dups] = NULL;
  257. return dups;
  258. }
  259. static void label_destroy(struct aa_label *label)
  260. {
  261. struct aa_label *tmp;
  262. AA_BUG(!label);
  263. if (!label_isprofile(label)) {
  264. struct aa_profile *profile;
  265. struct label_it i;
  266. aa_put_str(label->hname);
  267. label_for_each(i, label, profile) {
  268. aa_put_profile(profile);
  269. label->vec[i.i] = (struct aa_profile *)
  270. (LABEL_POISON + (long) i.i);
  271. }
  272. }
  273. if (rcu_dereference_protected(label->proxy->label, true) == label)
  274. rcu_assign_pointer(label->proxy->label, NULL);
  275. aa_free_secid(label->secid);
  276. tmp = rcu_dereference_protected(label->proxy->label, true);
  277. if (tmp == label)
  278. rcu_assign_pointer(label->proxy->label, NULL);
  279. aa_put_proxy(label->proxy);
  280. label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
  281. }
  282. void aa_label_free(struct aa_label *label)
  283. {
  284. if (!label)
  285. return;
  286. label_destroy(label);
  287. kfree(label);
  288. }
  289. static void label_free_switch(struct aa_label *label)
  290. {
  291. if (label->flags & FLAG_NS_COUNT)
  292. aa_free_ns(labels_ns(label));
  293. else if (label_isprofile(label))
  294. aa_free_profile(labels_profile(label));
  295. else
  296. aa_label_free(label);
  297. }
  298. static void label_free_rcu(struct rcu_head *head)
  299. {
  300. struct aa_label *label = container_of(head, struct aa_label, rcu);
  301. if (label->flags & FLAG_IN_TREE)
  302. (void) aa_label_remove(label);
  303. label_free_switch(label);
  304. }
  305. void aa_label_kref(struct kref *kref)
  306. {
  307. struct aa_label *label = container_of(kref, struct aa_label, count);
  308. struct aa_ns *ns = labels_ns(label);
  309. if (!ns) {
  310. /* never live, no rcu callback needed, just using the fn */
  311. label_free_switch(label);
  312. return;
  313. }
  314. /* TODO: update labels_profile macro so it works here */
  315. AA_BUG(label_isprofile(label) &&
  316. on_list_rcu(&label->vec[0]->base.profiles));
  317. AA_BUG(label_isprofile(label) &&
  318. on_list_rcu(&label->vec[0]->base.list));
  319. /* TODO: if compound label and not stale add to reclaim cache */
  320. call_rcu(&label->rcu, label_free_rcu);
  321. }
  322. static void label_free_or_put_new(struct aa_label *label, struct aa_label *new)
  323. {
  324. if (label != new)
  325. /* need to free directly to break circular ref with proxy */
  326. aa_label_free(new);
  327. else
  328. aa_put_label(new);
  329. }
  330. bool aa_label_init(struct aa_label *label, int size)
  331. {
  332. AA_BUG(!label);
  333. AA_BUG(size < 1);
  334. label->secid = aa_alloc_secid();
  335. if (label->secid == AA_SECID_INVALID)
  336. return false;
  337. label->size = size; /* doesn't include null */
  338. label->vec[size] = NULL; /* null terminate */
  339. kref_init(&label->count);
  340. RB_CLEAR_NODE(&label->node);
  341. return true;
  342. }
  343. /**
  344. * aa_label_alloc - allocate a label with a profile vector of @size length
  345. * @size: size of profile vector in the label
  346. * @proxy: proxy to use OR null if to allocate a new one
  347. * @gfp: memory allocation type
  348. *
  349. * Returns: new label
  350. * else NULL if failed
  351. */
  352. struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
  353. {
  354. struct aa_label *new;
  355. AA_BUG(size < 1);
  356. /* + 1 for null terminator entry on vec */
  357. new = kzalloc(sizeof(*new) + sizeof(struct aa_profile *) * (size + 1),
  358. gfp);
  359. AA_DEBUG("%s (%p)\n", __func__, new);
  360. if (!new)
  361. goto fail;
  362. if (!aa_label_init(new, size))
  363. goto fail;
  364. if (!proxy) {
  365. proxy = aa_alloc_proxy(new, gfp);
  366. if (!proxy)
  367. goto fail;
  368. } else
  369. aa_get_proxy(proxy);
  370. /* just set new's proxy, don't redirect proxy here if it was passed in*/
  371. new->proxy = proxy;
  372. return new;
  373. fail:
  374. kfree(new);
  375. return NULL;
  376. }
  377. /**
  378. * label_cmp - label comparision for set ordering
  379. * @a: label to compare (NOT NULL)
  380. * @b: label to compare (NOT NULL)
  381. *
  382. * Returns: <0 if a < b
  383. * ==0 if a == b
  384. * >0 if a > b
  385. */
  386. static int label_cmp(struct aa_label *a, struct aa_label *b)
  387. {
  388. AA_BUG(!b);
  389. if (a == b)
  390. return 0;
  391. return vec_cmp(a->vec, a->size, b->vec, b->size);
  392. }
  393. /* helper fn for label_for_each_confined */
  394. int aa_label_next_confined(struct aa_label *label, int i)
  395. {
  396. AA_BUG(!label);
  397. AA_BUG(i < 0);
  398. for (; i < label->size; i++) {
  399. if (!profile_unconfined(label->vec[i]))
  400. return i;
  401. }
  402. return i;
  403. }
  404. /**
  405. * aa_label_next_not_in_set - return the next profile of @sub not in @set
  406. * @I: label iterator
  407. * @set: label to test against
  408. * @sub: label to if is subset of @set
  409. *
  410. * Returns: profile in @sub that is not in @set, with iterator set pos after
  411. * else NULL if @sub is a subset of @set
  412. */
  413. struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
  414. struct aa_label *set,
  415. struct aa_label *sub)
  416. {
  417. AA_BUG(!set);
  418. AA_BUG(!I);
  419. AA_BUG(I->i < 0);
  420. AA_BUG(I->i > set->size);
  421. AA_BUG(!sub);
  422. AA_BUG(I->j < 0);
  423. AA_BUG(I->j > sub->size);
  424. while (I->j < sub->size && I->i < set->size) {
  425. int res = profile_cmp(sub->vec[I->j], set->vec[I->i]);
  426. if (res == 0) {
  427. (I->j)++;
  428. (I->i)++;
  429. } else if (res > 0)
  430. (I->i)++;
  431. else
  432. return sub->vec[(I->j)++];
  433. }
  434. if (I->j < sub->size)
  435. return sub->vec[(I->j)++];
  436. return NULL;
  437. }
  438. /**
  439. * aa_label_is_subset - test if @sub is a subset of @set
  440. * @set: label to test against
  441. * @sub: label to test if is subset of @set
  442. *
  443. * Returns: true if @sub is subset of @set
  444. * else false
  445. */
  446. bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
  447. {
  448. struct label_it i = { };
  449. AA_BUG(!set);
  450. AA_BUG(!sub);
  451. if (sub == set)
  452. return true;
  453. return __aa_label_next_not_in_set(&i, set, sub) == NULL;
  454. }
  455. /**
  456. * __label_remove - remove @label from the label set
  457. * @l: label to remove
  458. * @new: label to redirect to
  459. *
  460. * Requires: labels_set(@label)->lock write_lock
  461. * Returns: true if the label was in the tree and removed
  462. */
  463. static bool __label_remove(struct aa_label *label, struct aa_label *new)
  464. {
  465. struct aa_labelset *ls = labels_set(label);
  466. AA_BUG(!ls);
  467. AA_BUG(!label);
  468. lockdep_assert_held_exclusive(&ls->lock);
  469. if (new)
  470. __aa_proxy_redirect(label, new);
  471. if (!label_is_stale(label))
  472. __label_make_stale(label);
  473. if (label->flags & FLAG_IN_TREE) {
  474. rb_erase(&label->node, &ls->root);
  475. label->flags &= ~FLAG_IN_TREE;
  476. return true;
  477. }
  478. return false;
  479. }
  480. /**
  481. * __label_replace - replace @old with @new in label set
  482. * @old: label to remove from label set
  483. * @new: label to replace @old with
  484. *
  485. * Requires: labels_set(@old)->lock write_lock
  486. * valid ref count be held on @new
  487. * Returns: true if @old was in set and replaced by @new
  488. *
  489. * Note: current implementation requires label set be order in such a way
  490. * that @new directly replaces @old position in the set (ie.
  491. * using pointer comparison of the label address would not work)
  492. */
  493. static bool __label_replace(struct aa_label *old, struct aa_label *new)
  494. {
  495. struct aa_labelset *ls = labels_set(old);
  496. AA_BUG(!ls);
  497. AA_BUG(!old);
  498. AA_BUG(!new);
  499. lockdep_assert_held_exclusive(&ls->lock);
  500. AA_BUG(new->flags & FLAG_IN_TREE);
  501. if (!label_is_stale(old))
  502. __label_make_stale(old);
  503. if (old->flags & FLAG_IN_TREE) {
  504. rb_replace_node(&old->node, &new->node, &ls->root);
  505. old->flags &= ~FLAG_IN_TREE;
  506. new->flags |= FLAG_IN_TREE;
  507. return true;
  508. }
  509. return false;
  510. }
  511. /**
  512. * __label_insert - attempt to insert @l into a label set
  513. * @ls: set of labels to insert @l into (NOT NULL)
  514. * @label: new label to insert (NOT NULL)
  515. * @replace: whether insertion should replace existing entry that is not stale
  516. *
  517. * Requires: @ls->lock
  518. * caller to hold a valid ref on l
  519. * if @replace is true l has a preallocated proxy associated
  520. * Returns: @l if successful in inserting @l - with additional refcount
  521. * else ref counted equivalent label that is already in the set,
  522. * the else condition only happens if @replace is false
  523. */
  524. static struct aa_label *__label_insert(struct aa_labelset *ls,
  525. struct aa_label *label, bool replace)
  526. {
  527. struct rb_node **new, *parent = NULL;
  528. AA_BUG(!ls);
  529. AA_BUG(!label);
  530. AA_BUG(labels_set(label) != ls);
  531. lockdep_assert_held_exclusive(&ls->lock);
  532. AA_BUG(label->flags & FLAG_IN_TREE);
  533. /* Figure out where to put new node */
  534. new = &ls->root.rb_node;
  535. while (*new) {
  536. struct aa_label *this = rb_entry(*new, struct aa_label, node);
  537. int result = label_cmp(label, this);
  538. parent = *new;
  539. if (result == 0) {
  540. /* !__aa_get_label means queued for destruction,
  541. * so replace in place, however the label has
  542. * died before the replacement so do not share
  543. * the proxy
  544. */
  545. if (!replace && !label_is_stale(this)) {
  546. if (__aa_get_label(this))
  547. return this;
  548. } else
  549. __proxy_share(this, label);
  550. AA_BUG(!__label_replace(this, label));
  551. return aa_get_label(label);
  552. } else if (result < 0)
  553. new = &((*new)->rb_left);
  554. else /* (result > 0) */
  555. new = &((*new)->rb_right);
  556. }
  557. /* Add new node and rebalance tree. */
  558. rb_link_node(&label->node, parent, new);
  559. rb_insert_color(&label->node, &ls->root);
  560. label->flags |= FLAG_IN_TREE;
  561. return aa_get_label(label);
  562. }
  563. /**
  564. * __vec_find - find label that matches @vec in label set
  565. * @vec: vec of profiles to find matching label for (NOT NULL)
  566. * @n: length of @vec
  567. *
  568. * Requires: @vec_labelset(vec) lock held
  569. * caller to hold a valid ref on l
  570. *
  571. * Returns: ref counted @label if matching label is in tree
  572. * ref counted label that is equiv to @l in tree
  573. * else NULL if @vec equiv is not in tree
  574. */
  575. static struct aa_label *__vec_find(struct aa_profile **vec, int n)
  576. {
  577. struct rb_node *node;
  578. AA_BUG(!vec);
  579. AA_BUG(!*vec);
  580. AA_BUG(n <= 0);
  581. node = vec_labelset(vec, n)->root.rb_node;
  582. while (node) {
  583. struct aa_label *this = rb_entry(node, struct aa_label, node);
  584. int result = vec_cmp(this->vec, this->size, vec, n);
  585. if (result > 0)
  586. node = node->rb_left;
  587. else if (result < 0)
  588. node = node->rb_right;
  589. else
  590. return __aa_get_label(this);
  591. }
  592. return NULL;
  593. }
  594. /**
  595. * __label_find - find label @label in label set
  596. * @label: label to find (NOT NULL)
  597. *
  598. * Requires: labels_set(@label)->lock held
  599. * caller to hold a valid ref on l
  600. *
  601. * Returns: ref counted @label if @label is in tree OR
  602. * ref counted label that is equiv to @label in tree
  603. * else NULL if @label or equiv is not in tree
  604. */
  605. static struct aa_label *__label_find(struct aa_label *label)
  606. {
  607. AA_BUG(!label);
  608. return __vec_find(label->vec, label->size);
  609. }
  610. /**
  611. * aa_label_remove - remove a label from the labelset
  612. * @label: label to remove
  613. *
  614. * Returns: true if @label was removed from the tree
  615. * else @label was not in tree so it could not be removed
  616. */
  617. bool aa_label_remove(struct aa_label *label)
  618. {
  619. struct aa_labelset *ls = labels_set(label);
  620. unsigned long flags;
  621. bool res;
  622. AA_BUG(!ls);
  623. write_lock_irqsave(&ls->lock, flags);
  624. res = __label_remove(label, ns_unconfined(labels_ns(label)));
  625. write_unlock_irqrestore(&ls->lock, flags);
  626. return res;
  627. }
  628. /**
  629. * aa_label_replace - replace a label @old with a new version @new
  630. * @old: label to replace
  631. * @new: label replacing @old
  632. *
  633. * Returns: true if @old was in tree and replaced
  634. * else @old was not in tree, and @new was not inserted
  635. */
  636. bool aa_label_replace(struct aa_label *old, struct aa_label *new)
  637. {
  638. unsigned long flags;
  639. bool res;
  640. if (name_is_shared(old, new) && labels_ns(old) == labels_ns(new)) {
  641. write_lock_irqsave(&labels_set(old)->lock, flags);
  642. if (old->proxy != new->proxy)
  643. __proxy_share(old, new);
  644. else
  645. __aa_proxy_redirect(old, new);
  646. res = __label_replace(old, new);
  647. write_unlock_irqrestore(&labels_set(old)->lock, flags);
  648. } else {
  649. struct aa_label *l;
  650. struct aa_labelset *ls = labels_set(old);
  651. write_lock_irqsave(&ls->lock, flags);
  652. res = __label_remove(old, new);
  653. if (labels_ns(old) != labels_ns(new)) {
  654. write_unlock_irqrestore(&ls->lock, flags);
  655. ls = labels_set(new);
  656. write_lock_irqsave(&ls->lock, flags);
  657. }
  658. l = __label_insert(ls, new, true);
  659. res = (l == new);
  660. write_unlock_irqrestore(&ls->lock, flags);
  661. aa_put_label(l);
  662. }
  663. return res;
  664. }
  665. /**
  666. * vec_find - find label @l in label set
  667. * @vec: array of profiles to find equiv label for (NOT NULL)
  668. * @n: length of @vec
  669. *
  670. * Returns: refcounted label if @vec equiv is in tree
  671. * else NULL if @vec equiv is not in tree
  672. */
  673. static struct aa_label *vec_find(struct aa_profile **vec, int n)
  674. {
  675. struct aa_labelset *ls;
  676. struct aa_label *label;
  677. unsigned long flags;
  678. AA_BUG(!vec);
  679. AA_BUG(!*vec);
  680. AA_BUG(n <= 0);
  681. ls = vec_labelset(vec, n);
  682. read_lock_irqsave(&ls->lock, flags);
  683. label = __vec_find(vec, n);
  684. read_unlock_irqrestore(&ls->lock, flags);
  685. return label;
  686. }
  687. /* requires sort and merge done first */
  688. static struct aa_label *vec_create_and_insert_label(struct aa_profile **vec,
  689. int len, gfp_t gfp)
  690. {
  691. struct aa_label *label = NULL;
  692. struct aa_labelset *ls;
  693. unsigned long flags;
  694. struct aa_label *new;
  695. int i;
  696. AA_BUG(!vec);
  697. if (len == 1)
  698. return aa_get_label(&vec[0]->label);
  699. ls = labels_set(&vec[len - 1]->label);
  700. /* TODO: enable when read side is lockless
  701. * check if label exists before taking locks
  702. */
  703. new = aa_label_alloc(len, NULL, gfp);
  704. if (!new)
  705. return NULL;
  706. for (i = 0; i < len; i++)
  707. new->vec[i] = aa_get_profile(vec[i]);
  708. write_lock_irqsave(&ls->lock, flags);
  709. label = __label_insert(ls, new, false);
  710. write_unlock_irqrestore(&ls->lock, flags);
  711. label_free_or_put_new(label, new);
  712. return label;
  713. }
  714. struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
  715. gfp_t gfp)
  716. {
  717. struct aa_label *label = vec_find(vec, len);
  718. if (label)
  719. return label;
  720. return vec_create_and_insert_label(vec, len, gfp);
  721. }
  722. /**
  723. * aa_label_find - find label @label in label set
  724. * @label: label to find (NOT NULL)
  725. *
  726. * Requires: caller to hold a valid ref on l
  727. *
  728. * Returns: refcounted @label if @label is in tree
  729. * refcounted label that is equiv to @label in tree
  730. * else NULL if @label or equiv is not in tree
  731. */
  732. struct aa_label *aa_label_find(struct aa_label *label)
  733. {
  734. AA_BUG(!label);
  735. return vec_find(label->vec, label->size);
  736. }
  737. /**
  738. * aa_label_insert - insert label @label into @ls or return existing label
  739. * @ls - labelset to insert @label into
  740. * @label - label to insert
  741. *
  742. * Requires: caller to hold a valid ref on @label
  743. *
  744. * Returns: ref counted @label if successful in inserting @label
  745. * else ref counted equivalent label that is already in the set
  746. */
  747. struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *label)
  748. {
  749. struct aa_label *l;
  750. unsigned long flags;
  751. AA_BUG(!ls);
  752. AA_BUG(!label);
  753. /* check if label exists before taking lock */
  754. if (!label_is_stale(label)) {
  755. read_lock_irqsave(&ls->lock, flags);
  756. l = __label_find(label);
  757. read_unlock_irqrestore(&ls->lock, flags);
  758. if (l)
  759. return l;
  760. }
  761. write_lock_irqsave(&ls->lock, flags);
  762. l = __label_insert(ls, label, false);
  763. write_unlock_irqrestore(&ls->lock, flags);
  764. return l;
  765. }
  766. /**
  767. * aa_label_next_in_merge - find the next profile when merging @a and @b
  768. * @I: label iterator
  769. * @a: label to merge
  770. * @b: label to merge
  771. *
  772. * Returns: next profile
  773. * else null if no more profiles
  774. */
  775. struct aa_profile *aa_label_next_in_merge(struct label_it *I,
  776. struct aa_label *a,
  777. struct aa_label *b)
  778. {
  779. AA_BUG(!a);
  780. AA_BUG(!b);
  781. AA_BUG(!I);
  782. AA_BUG(I->i < 0);
  783. AA_BUG(I->i > a->size);
  784. AA_BUG(I->j < 0);
  785. AA_BUG(I->j > b->size);
  786. if (I->i < a->size) {
  787. if (I->j < b->size) {
  788. int res = profile_cmp(a->vec[I->i], b->vec[I->j]);
  789. if (res > 0)
  790. return b->vec[(I->j)++];
  791. if (res == 0)
  792. (I->j)++;
  793. }
  794. return a->vec[(I->i)++];
  795. }
  796. if (I->j < b->size)
  797. return b->vec[(I->j)++];
  798. return NULL;
  799. }
  800. /**
  801. * label_merge_cmp - cmp of @a merging with @b against @z for set ordering
  802. * @a: label to merge then compare (NOT NULL)
  803. * @b: label to merge then compare (NOT NULL)
  804. * @z: label to compare merge against (NOT NULL)
  805. *
  806. * Assumes: using the most recent versions of @a, @b, and @z
  807. *
  808. * Returns: <0 if a < b
  809. * ==0 if a == b
  810. * >0 if a > b
  811. */
  812. static int label_merge_cmp(struct aa_label *a, struct aa_label *b,
  813. struct aa_label *z)
  814. {
  815. struct aa_profile *p = NULL;
  816. struct label_it i = { };
  817. int k;
  818. AA_BUG(!a);
  819. AA_BUG(!b);
  820. AA_BUG(!z);
  821. for (k = 0;
  822. k < z->size && (p = aa_label_next_in_merge(&i, a, b));
  823. k++) {
  824. int res = profile_cmp(p, z->vec[k]);
  825. if (res != 0)
  826. return res;
  827. }
  828. if (p)
  829. return 1;
  830. else if (k < z->size)
  831. return -1;
  832. return 0;
  833. }
  834. /**
  835. * label_merge_insert - create a new label by merging @a and @b
  836. * @new: preallocated label to merge into (NOT NULL)
  837. * @a: label to merge with @b (NOT NULL)
  838. * @b: label to merge with @a (NOT NULL)
  839. *
  840. * Requires: preallocated proxy
  841. *
  842. * Returns: ref counted label either @new if merge is unique
  843. * @a if @b is a subset of @a
  844. * @b if @a is a subset of @b
  845. *
  846. * NOTE: will not use @new if the merge results in @new == @a or @b
  847. *
  848. * Must be used within labelset write lock to avoid racing with
  849. * setting labels stale.
  850. */
  851. static struct aa_label *label_merge_insert(struct aa_label *new,
  852. struct aa_label *a,
  853. struct aa_label *b)
  854. {
  855. struct aa_label *label;
  856. struct aa_labelset *ls;
  857. struct aa_profile *next;
  858. struct label_it i;
  859. unsigned long flags;
  860. int k = 0, invcount = 0;
  861. bool stale = false;
  862. AA_BUG(!a);
  863. AA_BUG(a->size < 0);
  864. AA_BUG(!b);
  865. AA_BUG(b->size < 0);
  866. AA_BUG(!new);
  867. AA_BUG(new->size < a->size + b->size);
  868. label_for_each_in_merge(i, a, b, next) {
  869. AA_BUG(!next);
  870. if (profile_is_stale(next)) {
  871. new->vec[k] = aa_get_newest_profile(next);
  872. AA_BUG(!new->vec[k]->label.proxy);
  873. AA_BUG(!new->vec[k]->label.proxy->label);
  874. if (next->label.proxy != new->vec[k]->label.proxy)
  875. invcount++;
  876. k++;
  877. stale = true;
  878. } else
  879. new->vec[k++] = aa_get_profile(next);
  880. }
  881. /* set to actual size which is <= allocated len */
  882. new->size = k;
  883. new->vec[k] = NULL;
  884. if (invcount) {
  885. new->size -= aa_vec_unique(&new->vec[0], new->size,
  886. VEC_FLAG_TERMINATE);
  887. /* TODO: deal with reference labels */
  888. if (new->size == 1) {
  889. label = aa_get_label(&new->vec[0]->label);
  890. return label;
  891. }
  892. } else if (!stale) {
  893. /*
  894. * merge could be same as a || b, note: it is not possible
  895. * for new->size == a->size == b->size unless a == b
  896. */
  897. if (k == a->size)
  898. return aa_get_label(a);
  899. else if (k == b->size)
  900. return aa_get_label(b);
  901. }
  902. if (vec_unconfined(new->vec, new->size))
  903. new->flags |= FLAG_UNCONFINED;
  904. ls = labels_set(new);
  905. write_lock_irqsave(&ls->lock, flags);
  906. label = __label_insert(labels_set(new), new, false);
  907. write_unlock_irqrestore(&ls->lock, flags);
  908. return label;
  909. }
  910. /**
  911. * labelset_of_merge - find which labelset a merged label should be inserted
  912. * @a: label to merge and insert
  913. * @b: label to merge and insert
  914. *
  915. * Returns: labelset that the merged label should be inserted into
  916. */
  917. static struct aa_labelset *labelset_of_merge(struct aa_label *a,
  918. struct aa_label *b)
  919. {
  920. struct aa_ns *nsa = labels_ns(a);
  921. struct aa_ns *nsb = labels_ns(b);
  922. if (ns_cmp(nsa, nsb) <= 0)
  923. return &nsa->labels;
  924. return &nsb->labels;
  925. }
  926. /**
  927. * __label_find_merge - find label that is equiv to merge of @a and @b
  928. * @ls: set of labels to search (NOT NULL)
  929. * @a: label to merge with @b (NOT NULL)
  930. * @b: label to merge with @a (NOT NULL)
  931. *
  932. * Requires: ls->lock read_lock held
  933. *
  934. * Returns: ref counted label that is equiv to merge of @a and @b
  935. * else NULL if merge of @a and @b is not in set
  936. */
  937. static struct aa_label *__label_find_merge(struct aa_labelset *ls,
  938. struct aa_label *a,
  939. struct aa_label *b)
  940. {
  941. struct rb_node *node;
  942. AA_BUG(!ls);
  943. AA_BUG(!a);
  944. AA_BUG(!b);
  945. if (a == b)
  946. return __label_find(a);
  947. node = ls->root.rb_node;
  948. while (node) {
  949. struct aa_label *this = container_of(node, struct aa_label,
  950. node);
  951. int result = label_merge_cmp(a, b, this);
  952. if (result < 0)
  953. node = node->rb_left;
  954. else if (result > 0)
  955. node = node->rb_right;
  956. else
  957. return __aa_get_label(this);
  958. }
  959. return NULL;
  960. }
  961. /**
  962. * aa_label_find_merge - find label that is equiv to merge of @a and @b
  963. * @a: label to merge with @b (NOT NULL)
  964. * @b: label to merge with @a (NOT NULL)
  965. *
  966. * Requires: labels be fully constructed with a valid ns
  967. *
  968. * Returns: ref counted label that is equiv to merge of @a and @b
  969. * else NULL if merge of @a and @b is not in set
  970. */
  971. struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b)
  972. {
  973. struct aa_labelset *ls;
  974. struct aa_label *label, *ar = NULL, *br = NULL;
  975. unsigned long flags;
  976. AA_BUG(!a);
  977. AA_BUG(!b);
  978. if (label_is_stale(a))
  979. a = ar = aa_get_newest_label(a);
  980. if (label_is_stale(b))
  981. b = br = aa_get_newest_label(b);
  982. ls = labelset_of_merge(a, b);
  983. read_lock_irqsave(&ls->lock, flags);
  984. label = __label_find_merge(ls, a, b);
  985. read_unlock_irqrestore(&ls->lock, flags);
  986. aa_put_label(ar);
  987. aa_put_label(br);
  988. return label;
  989. }
  990. /**
  991. * aa_label_merge - attempt to insert new merged label of @a and @b
  992. * @ls: set of labels to insert label into (NOT NULL)
  993. * @a: label to merge with @b (NOT NULL)
  994. * @b: label to merge with @a (NOT NULL)
  995. * @gfp: memory allocation type
  996. *
  997. * Requires: caller to hold valid refs on @a and @b
  998. * labels be fully constructed with a valid ns
  999. *
  1000. * Returns: ref counted new label if successful in inserting merge of a & b
  1001. * else ref counted equivalent label that is already in the set.
  1002. * else NULL if could not create label (-ENOMEM)
  1003. */
  1004. struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
  1005. gfp_t gfp)
  1006. {
  1007. struct aa_label *label = NULL;
  1008. AA_BUG(!a);
  1009. AA_BUG(!b);
  1010. if (a == b)
  1011. return aa_get_newest_label(a);
  1012. /* TODO: enable when read side is lockless
  1013. * check if label exists before taking locks
  1014. if (!label_is_stale(a) && !label_is_stale(b))
  1015. label = aa_label_find_merge(a, b);
  1016. */
  1017. if (!label) {
  1018. struct aa_label *new;
  1019. a = aa_get_newest_label(a);
  1020. b = aa_get_newest_label(b);
  1021. /* could use label_merge_len(a, b), but requires double
  1022. * comparison for small savings
  1023. */
  1024. new = aa_label_alloc(a->size + b->size, NULL, gfp);
  1025. if (!new)
  1026. goto out;
  1027. label = label_merge_insert(new, a, b);
  1028. label_free_or_put_new(label, new);
  1029. out:
  1030. aa_put_label(a);
  1031. aa_put_label(b);
  1032. }
  1033. return label;
  1034. }
  1035. static inline bool label_is_visible(struct aa_profile *profile,
  1036. struct aa_label *label)
  1037. {
  1038. return aa_ns_visible(profile->ns, labels_ns(label), true);
  1039. }
  1040. /* match a profile and its associated ns component if needed
  1041. * Assumes visibility test has already been done.
  1042. * If a subns profile is not to be matched should be prescreened with
  1043. * visibility test.
  1044. */
  1045. static inline unsigned int match_component(struct aa_profile *profile,
  1046. struct aa_profile *tp,
  1047. unsigned int state)
  1048. {
  1049. const char *ns_name;
  1050. if (profile->ns == tp->ns)
  1051. return aa_dfa_match(profile->policy.dfa, state, tp->base.hname);
  1052. /* try matching with namespace name and then profile */
  1053. ns_name = aa_ns_name(profile->ns, tp->ns, true);
  1054. state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1);
  1055. state = aa_dfa_match(profile->policy.dfa, state, ns_name);
  1056. state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1);
  1057. return aa_dfa_match(profile->policy.dfa, state, tp->base.hname);
  1058. }
  1059. /**
  1060. * label_compound_match - find perms for full compound label
  1061. * @profile: profile to find perms for
  1062. * @label: label to check access permissions for
  1063. * @start: state to start match in
  1064. * @subns: whether to do permission checks on components in a subns
  1065. * @request: permissions to request
  1066. * @perms: perms struct to set
  1067. *
  1068. * Returns: 0 on success else ERROR
  1069. *
  1070. * For the label A//&B//&C this does the perm match for A//&B//&C
  1071. * @perms should be preinitialized with allperms OR a previous permission
  1072. * check to be stacked.
  1073. */
  1074. static int label_compound_match(struct aa_profile *profile,
  1075. struct aa_label *label,
  1076. unsigned int state, bool subns, u32 request,
  1077. struct aa_perms *perms)
  1078. {
  1079. struct aa_profile *tp;
  1080. struct label_it i;
  1081. /* find first subcomponent that is visible */
  1082. label_for_each(i, label, tp) {
  1083. if (!aa_ns_visible(profile->ns, tp->ns, subns))
  1084. continue;
  1085. state = match_component(profile, tp, state);
  1086. if (!state)
  1087. goto fail;
  1088. goto next;
  1089. }
  1090. /* no component visible */
  1091. *perms = allperms;
  1092. return 0;
  1093. next:
  1094. label_for_each_cont(i, label, tp) {
  1095. if (!aa_ns_visible(profile->ns, tp->ns, subns))
  1096. continue;
  1097. state = aa_dfa_match(profile->policy.dfa, state, "//&");
  1098. state = match_component(profile, tp, state);
  1099. if (!state)
  1100. goto fail;
  1101. }
  1102. aa_compute_perms(profile->policy.dfa, state, perms);
  1103. aa_apply_modes_to_perms(profile, perms);
  1104. if ((perms->allow & request) != request)
  1105. return -EACCES;
  1106. return 0;
  1107. fail:
  1108. *perms = nullperms;
  1109. return state;
  1110. }
  1111. /**
  1112. * label_components_match - find perms for all subcomponents of a label
  1113. * @profile: profile to find perms for
  1114. * @label: label to check access permissions for
  1115. * @start: state to start match in
  1116. * @subns: whether to do permission checks on components in a subns
  1117. * @request: permissions to request
  1118. * @perms: an initialized perms struct to add accumulation to
  1119. *
  1120. * Returns: 0 on success else ERROR
  1121. *
  1122. * For the label A//&B//&C this does the perm match for each of A and B and C
  1123. * @perms should be preinitialized with allperms OR a previous permission
  1124. * check to be stacked.
  1125. */
  1126. static int label_components_match(struct aa_profile *profile,
  1127. struct aa_label *label, unsigned int start,
  1128. bool subns, u32 request,
  1129. struct aa_perms *perms)
  1130. {
  1131. struct aa_profile *tp;
  1132. struct label_it i;
  1133. struct aa_perms tmp;
  1134. unsigned int state = 0;
  1135. /* find first subcomponent to test */
  1136. label_for_each(i, label, tp) {
  1137. if (!aa_ns_visible(profile->ns, tp->ns, subns))
  1138. continue;
  1139. state = match_component(profile, tp, start);
  1140. if (!state)
  1141. goto fail;
  1142. goto next;
  1143. }
  1144. /* no subcomponents visible - no change in perms */
  1145. return 0;
  1146. next:
  1147. aa_compute_perms(profile->policy.dfa, state, &tmp);
  1148. aa_apply_modes_to_perms(profile, &tmp);
  1149. aa_perms_accum(perms, &tmp);
  1150. label_for_each_cont(i, label, tp) {
  1151. if (!aa_ns_visible(profile->ns, tp->ns, subns))
  1152. continue;
  1153. state = match_component(profile, tp, start);
  1154. if (!state)
  1155. goto fail;
  1156. aa_compute_perms(profile->policy.dfa, state, &tmp);
  1157. aa_apply_modes_to_perms(profile, &tmp);
  1158. aa_perms_accum(perms, &tmp);
  1159. }
  1160. if ((perms->allow & request) != request)
  1161. return -EACCES;
  1162. return 0;
  1163. fail:
  1164. *perms = nullperms;
  1165. return -EACCES;
  1166. }
  1167. /**
  1168. * aa_label_match - do a multi-component label match
  1169. * @profile: profile to match against (NOT NULL)
  1170. * @label: label to match (NOT NULL)
  1171. * @state: state to start in
  1172. * @subns: whether to match subns components
  1173. * @request: permission request
  1174. * @perms: Returns computed perms (NOT NULL)
  1175. *
  1176. * Returns: the state the match finished in, may be the none matching state
  1177. */
  1178. int aa_label_match(struct aa_profile *profile, struct aa_label *label,
  1179. unsigned int state, bool subns, u32 request,
  1180. struct aa_perms *perms)
  1181. {
  1182. int error = label_compound_match(profile, label, state, subns, request,
  1183. perms);
  1184. if (!error)
  1185. return error;
  1186. *perms = allperms;
  1187. return label_components_match(profile, label, state, subns, request,
  1188. perms);
  1189. }
  1190. /**
  1191. * aa_update_label_name - update a label to have a stored name
  1192. * @ns: ns being viewed from (NOT NULL)
  1193. * @label: label to update (NOT NULL)
  1194. * @gfp: type of memory allocation
  1195. *
  1196. * Requires: labels_set(label) not locked in caller
  1197. *
  1198. * note: only updates the label name if it does not have a name already
  1199. * and if it is in the labelset
  1200. */
  1201. bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
  1202. {
  1203. struct aa_labelset *ls;
  1204. unsigned long flags;
  1205. char __counted *name;
  1206. bool res = false;
  1207. AA_BUG(!ns);
  1208. AA_BUG(!label);
  1209. if (label->hname || labels_ns(label) != ns)
  1210. return res;
  1211. if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) == -1)
  1212. return res;
  1213. ls = labels_set(label);
  1214. write_lock_irqsave(&ls->lock, flags);
  1215. if (!label->hname && label->flags & FLAG_IN_TREE) {
  1216. label->hname = name;
  1217. res = true;
  1218. } else
  1219. aa_put_str(name);
  1220. write_unlock_irqrestore(&ls->lock, flags);
  1221. return res;
  1222. }
  1223. /*
  1224. * cached label name is present and visible
  1225. * @label->hname only exists if label is namespace hierachical
  1226. */
  1227. static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
  1228. int flags)
  1229. {
  1230. if (label->hname && (!ns || labels_ns(label) == ns) &&
  1231. !(flags & ~FLAG_SHOW_MODE))
  1232. return true;
  1233. return false;
  1234. }
  1235. /* helper macro for snprint routines */
  1236. #define update_for_len(total, len, size, str) \
  1237. do { \
  1238. AA_BUG(len < 0); \
  1239. total += len; \
  1240. len = min(len, size); \
  1241. size -= len; \
  1242. str += len; \
  1243. } while (0)
  1244. /**
  1245. * aa_profile_snxprint - print a profile name to a buffer
  1246. * @str: buffer to write to. (MAY BE NULL if @size == 0)
  1247. * @size: size of buffer
  1248. * @view: namespace profile is being viewed from
  1249. * @profile: profile to view (NOT NULL)
  1250. * @flags: whether to include the mode string
  1251. * @prev_ns: last ns printed when used in compound print
  1252. *
  1253. * Returns: size of name written or would be written if larger than
  1254. * available buffer
  1255. *
  1256. * Note: will not print anything if the profile is not visible
  1257. */
  1258. static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view,
  1259. struct aa_profile *profile, int flags,
  1260. struct aa_ns **prev_ns)
  1261. {
  1262. const char *ns_name = NULL;
  1263. AA_BUG(!str && size != 0);
  1264. AA_BUG(!profile);
  1265. if (!view)
  1266. view = profiles_ns(profile);
  1267. if (view != profile->ns &&
  1268. (!prev_ns || (*prev_ns != profile->ns))) {
  1269. if (prev_ns)
  1270. *prev_ns = profile->ns;
  1271. ns_name = aa_ns_name(view, profile->ns,
  1272. flags & FLAG_VIEW_SUBNS);
  1273. if (ns_name == aa_hidden_ns_name) {
  1274. if (flags & FLAG_HIDDEN_UNCONFINED)
  1275. return snprintf(str, size, "%s", "unconfined");
  1276. return snprintf(str, size, "%s", ns_name);
  1277. }
  1278. }
  1279. if ((flags & FLAG_SHOW_MODE) && profile != profile->ns->unconfined) {
  1280. const char *modestr = aa_profile_mode_names[profile->mode];
  1281. if (ns_name)
  1282. return snprintf(str, size, ":%s:%s (%s)", ns_name,
  1283. profile->base.hname, modestr);
  1284. return snprintf(str, size, "%s (%s)", profile->base.hname,
  1285. modestr);
  1286. }
  1287. if (ns_name)
  1288. return snprintf(str, size, ":%s:%s", ns_name,
  1289. profile->base.hname);
  1290. return snprintf(str, size, "%s", profile->base.hname);
  1291. }
  1292. static const char *label_modename(struct aa_ns *ns, struct aa_label *label,
  1293. int flags)
  1294. {
  1295. struct aa_profile *profile;
  1296. struct label_it i;
  1297. int mode = -1, count = 0;
  1298. label_for_each(i, label, profile) {
  1299. if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
  1300. if (profile->mode == APPARMOR_UNCONFINED)
  1301. /* special case unconfined so stacks with
  1302. * unconfined don't report as mixed. ie.
  1303. * profile_foo//&:ns1:unconfined (mixed)
  1304. */
  1305. continue;
  1306. count++;
  1307. if (mode == -1)
  1308. mode = profile->mode;
  1309. else if (mode != profile->mode)
  1310. return "mixed";
  1311. }
  1312. }
  1313. if (count == 0)
  1314. return "-";
  1315. if (mode == -1)
  1316. /* everything was unconfined */
  1317. mode = APPARMOR_UNCONFINED;
  1318. return aa_profile_mode_names[mode];
  1319. }
  1320. /* if any visible label is not unconfined the display_mode returns true */
  1321. static inline bool display_mode(struct aa_ns *ns, struct aa_label *label,
  1322. int flags)
  1323. {
  1324. if ((flags & FLAG_SHOW_MODE)) {
  1325. struct aa_profile *profile;
  1326. struct label_it i;
  1327. label_for_each(i, label, profile) {
  1328. if (aa_ns_visible(ns, profile->ns,
  1329. flags & FLAG_VIEW_SUBNS) &&
  1330. profile != profile->ns->unconfined)
  1331. return true;
  1332. }
  1333. /* only ns->unconfined in set of profiles in ns */
  1334. return false;
  1335. }
  1336. return false;
  1337. }
  1338. /**
  1339. * aa_label_snxprint - print a label name to a string buffer
  1340. * @str: buffer to write to. (MAY BE NULL if @size == 0)
  1341. * @size: size of buffer
  1342. * @ns: namespace profile is being viewed from
  1343. * @label: label to view (NOT NULL)
  1344. * @flags: whether to include the mode string
  1345. *
  1346. * Returns: size of name written or would be written if larger than
  1347. * available buffer
  1348. *
  1349. * Note: labels do not have to be strictly hierarchical to the ns as
  1350. * objects may be shared across different namespaces and thus
  1351. * pickup labeling from each ns. If a particular part of the
  1352. * label is not visible it will just be excluded. And if none
  1353. * of the label is visible "---" will be used.
  1354. */
  1355. int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
  1356. struct aa_label *label, int flags)
  1357. {
  1358. struct aa_profile *profile;
  1359. struct aa_ns *prev_ns = NULL;
  1360. struct label_it i;
  1361. int count = 0, total = 0;
  1362. size_t len;
  1363. AA_BUG(!str && size != 0);
  1364. AA_BUG(!label);
  1365. if (flags & FLAG_ABS_ROOT) {
  1366. ns = root_ns;
  1367. len = snprintf(str, size, "=");
  1368. update_for_len(total, len, size, str);
  1369. } else if (!ns) {
  1370. ns = labels_ns(label);
  1371. }
  1372. label_for_each(i, label, profile) {
  1373. if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
  1374. if (count > 0) {
  1375. len = snprintf(str, size, "//&");
  1376. update_for_len(total, len, size, str);
  1377. }
  1378. len = aa_profile_snxprint(str, size, ns, profile,
  1379. flags & FLAG_VIEW_SUBNS,
  1380. &prev_ns);
  1381. update_for_len(total, len, size, str);
  1382. count++;
  1383. }
  1384. }
  1385. if (count == 0) {
  1386. if (flags & FLAG_HIDDEN_UNCONFINED)
  1387. return snprintf(str, size, "%s", "unconfined");
  1388. return snprintf(str, size, "%s", aa_hidden_ns_name);
  1389. }
  1390. /* count == 1 && ... is for backwards compat where the mode
  1391. * is not displayed for 'unconfined' in the current ns
  1392. */
  1393. if (display_mode(ns, label, flags)) {
  1394. len = snprintf(str, size, " (%s)",
  1395. label_modename(ns, label, flags));
  1396. update_for_len(total, len, size, str);
  1397. }
  1398. return total;
  1399. }
  1400. #undef update_for_len
  1401. /**
  1402. * aa_label_asxprint - allocate a string buffer and print label into it
  1403. * @strp: Returns - the allocated buffer with the label name. (NOT NULL)
  1404. * @ns: namespace profile is being viewed from
  1405. * @label: label to view (NOT NULL)
  1406. * @flags: flags controlling what label info is printed
  1407. * @gfp: kernel memory allocation type
  1408. *
  1409. * Returns: size of name written or would be written if larger than
  1410. * available buffer
  1411. */
  1412. int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
  1413. int flags, gfp_t gfp)
  1414. {
  1415. int size;
  1416. AA_BUG(!strp);
  1417. AA_BUG(!label);
  1418. size = aa_label_snxprint(NULL, 0, ns, label, flags);
  1419. if (size < 0)
  1420. return size;
  1421. *strp = kmalloc(size + 1, gfp);
  1422. if (!*strp)
  1423. return -ENOMEM;
  1424. return aa_label_snxprint(*strp, size + 1, ns, label, flags);
  1425. }
  1426. /**
  1427. * aa_label_acntsxprint - allocate a __counted string buffer and print label
  1428. * @strp: buffer to write to. (MAY BE NULL if @size == 0)
  1429. * @ns: namespace profile is being viewed from
  1430. * @label: label to view (NOT NULL)
  1431. * @flags: flags controlling what label info is printed
  1432. * @gfp: kernel memory allocation type
  1433. *
  1434. * Returns: size of name written or would be written if larger than
  1435. * available buffer
  1436. */
  1437. int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
  1438. struct aa_label *label, int flags, gfp_t gfp)
  1439. {
  1440. int size;
  1441. AA_BUG(!strp);
  1442. AA_BUG(!label);
  1443. size = aa_label_snxprint(NULL, 0, ns, label, flags);
  1444. if (size < 0)
  1445. return size;
  1446. *strp = aa_str_alloc(size + 1, gfp);
  1447. if (!*strp)
  1448. return -ENOMEM;
  1449. return aa_label_snxprint(*strp, size + 1, ns, label, flags);
  1450. }
  1451. void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
  1452. struct aa_label *label, int flags, gfp_t gfp)
  1453. {
  1454. const char *str;
  1455. char *name = NULL;
  1456. int len;
  1457. AA_BUG(!ab);
  1458. AA_BUG(!label);
  1459. if (!use_label_hname(ns, label, flags) ||
  1460. display_mode(ns, label, flags)) {
  1461. len = aa_label_asxprint(&name, ns, label, flags, gfp);
  1462. if (len == -1) {
  1463. AA_DEBUG("label print error");
  1464. return;
  1465. }
  1466. str = name;
  1467. } else {
  1468. str = (char *) label->hname;
  1469. len = strlen(str);
  1470. }
  1471. if (audit_string_contains_control(str, len))
  1472. audit_log_n_hex(ab, str, len);
  1473. else
  1474. audit_log_n_string(ab, str, len);
  1475. kfree(name);
  1476. }
  1477. void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
  1478. struct aa_label *label, int flags, gfp_t gfp)
  1479. {
  1480. AA_BUG(!f);
  1481. AA_BUG(!label);
  1482. if (!use_label_hname(ns, label, flags)) {
  1483. char *str;
  1484. int len;
  1485. len = aa_label_asxprint(&str, ns, label, flags, gfp);
  1486. if (len == -1) {
  1487. AA_DEBUG("label print error");
  1488. return;
  1489. }
  1490. seq_printf(f, "%s", str);
  1491. kfree(str);
  1492. } else if (display_mode(ns, label, flags))
  1493. seq_printf(f, "%s (%s)", label->hname,
  1494. label_modename(ns, label, flags));
  1495. else
  1496. seq_printf(f, "%s", label->hname);
  1497. }
  1498. void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
  1499. gfp_t gfp)
  1500. {
  1501. AA_BUG(!label);
  1502. if (!use_label_hname(ns, label, flags)) {
  1503. char *str;
  1504. int len;
  1505. len = aa_label_asxprint(&str, ns, label, flags, gfp);
  1506. if (len == -1) {
  1507. AA_DEBUG("label print error");
  1508. return;
  1509. }
  1510. pr_info("%s", str);
  1511. kfree(str);
  1512. } else if (display_mode(ns, label, flags))
  1513. pr_info("%s (%s)", label->hname,
  1514. label_modename(ns, label, flags));
  1515. else
  1516. pr_info("%s", label->hname);
  1517. }
  1518. void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp)
  1519. {
  1520. struct aa_ns *ns = aa_get_current_ns();
  1521. aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp);
  1522. aa_put_ns(ns);
  1523. }
  1524. void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp)
  1525. {
  1526. struct aa_ns *ns = aa_get_current_ns();
  1527. aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp);
  1528. aa_put_ns(ns);
  1529. }
  1530. void aa_label_printk(struct aa_label *label, gfp_t gfp)
  1531. {
  1532. struct aa_ns *ns = aa_get_current_ns();
  1533. aa_label_xprintk(ns, label, FLAG_VIEW_SUBNS, gfp);
  1534. aa_put_ns(ns);
  1535. }
  1536. static int label_count_str_entries(const char *str)
  1537. {
  1538. const char *split;
  1539. int count = 1;
  1540. AA_BUG(!str);
  1541. for (split = aa_label_str_split(str);
  1542. split;
  1543. split = aa_label_str_split(str)) {
  1544. count++;
  1545. str = split + 3;
  1546. }
  1547. return count;
  1548. }
  1549. /*
  1550. * ensure stacks with components like
  1551. * :ns:A//&B
  1552. * have :ns: applied to both 'A' and 'B' by making the lookup relative
  1553. * to the base if the lookup specifies an ns, else making the stacked lookup
  1554. * relative to the last embedded ns in the string.
  1555. */
  1556. static struct aa_profile *fqlookupn_profile(struct aa_label *base,
  1557. struct aa_label *currentbase,
  1558. const char *str, size_t n)
  1559. {
  1560. const char *first = skipn_spaces(str, n);
  1561. if (first && *first == ':')
  1562. return aa_fqlookupn_profile(base, str, n);
  1563. return aa_fqlookupn_profile(currentbase, str, n);
  1564. }
  1565. /**
  1566. * aa_label_parse - parse, validate and convert a text string to a label
  1567. * @base: base label to use for lookups (NOT NULL)
  1568. * @str: null terminated text string (NOT NULL)
  1569. * @gfp: allocation type
  1570. * @create: true if should create compound labels if they don't exist
  1571. * @force_stack: true if should stack even if no leading &
  1572. *
  1573. * Returns: the matching refcounted label if present
  1574. * else ERRPTR
  1575. */
  1576. struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
  1577. gfp_t gfp, bool create, bool force_stack)
  1578. {
  1579. DEFINE_VEC(profile, vec);
  1580. struct aa_label *label, *currbase = base;
  1581. int i, len, stack = 0, error;
  1582. const char *split;
  1583. AA_BUG(!base);
  1584. AA_BUG(!str);
  1585. str = skip_spaces(str);
  1586. len = label_count_str_entries(str);
  1587. if (*str == '&' || force_stack) {
  1588. /* stack on top of base */
  1589. stack = base->size;
  1590. len += stack;
  1591. if (*str == '&')
  1592. str++;
  1593. }
  1594. if (*str == '=')
  1595. base = &root_ns->unconfined->label;
  1596. error = vec_setup(profile, vec, len, gfp);
  1597. if (error)
  1598. return ERR_PTR(error);
  1599. for (i = 0; i < stack; i++)
  1600. vec[i] = aa_get_profile(base->vec[i]);
  1601. for (split = aa_label_str_split(str), i = stack;
  1602. split && i < len; i++) {
  1603. vec[i] = fqlookupn_profile(base, currbase, str, split - str);
  1604. if (!vec[i])
  1605. goto fail;
  1606. /*
  1607. * if component specified a new ns it becomes the new base
  1608. * so that subsequent lookups are relative to it
  1609. */
  1610. if (vec[i]->ns != labels_ns(currbase))
  1611. currbase = &vec[i]->label;
  1612. str = split + 3;
  1613. split = aa_label_str_split(str);
  1614. }
  1615. /* last element doesn't have a split */
  1616. if (i < len) {
  1617. vec[i] = fqlookupn_profile(base, currbase, str, strlen(str));
  1618. if (!vec[i])
  1619. goto fail;
  1620. }
  1621. if (len == 1)
  1622. /* no need to free vec as len < LOCAL_VEC_ENTRIES */
  1623. return &vec[0]->label;
  1624. len -= aa_vec_unique(vec, len, VEC_FLAG_TERMINATE);
  1625. /* TODO: deal with reference labels */
  1626. if (len == 1) {
  1627. label = aa_get_label(&vec[0]->label);
  1628. goto out;
  1629. }
  1630. if (create)
  1631. label = aa_vec_find_or_create_label(vec, len, gfp);
  1632. else
  1633. label = vec_find(vec, len);
  1634. if (!label)
  1635. goto fail;
  1636. out:
  1637. /* use adjusted len from after vec_unique, not original */
  1638. vec_cleanup(profile, vec, len);
  1639. return label;
  1640. fail:
  1641. label = ERR_PTR(-ENOENT);
  1642. goto out;
  1643. }
  1644. /**
  1645. * aa_labelset_destroy - remove all labels from the label set
  1646. * @ls: label set to cleanup (NOT NULL)
  1647. *
  1648. * Labels that are removed from the set may still exist beyond the set
  1649. * being destroyed depending on their reference counting
  1650. */
  1651. void aa_labelset_destroy(struct aa_labelset *ls)
  1652. {
  1653. struct rb_node *node;
  1654. unsigned long flags;
  1655. AA_BUG(!ls);
  1656. write_lock_irqsave(&ls->lock, flags);
  1657. for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) {
  1658. struct aa_label *this = rb_entry(node, struct aa_label, node);
  1659. if (labels_ns(this) != root_ns)
  1660. __label_remove(this,
  1661. ns_unconfined(labels_ns(this)->parent));
  1662. else
  1663. __label_remove(this, NULL);
  1664. }
  1665. write_unlock_irqrestore(&ls->lock, flags);
  1666. }
  1667. /*
  1668. * @ls: labelset to init (NOT NULL)
  1669. */
  1670. void aa_labelset_init(struct aa_labelset *ls)
  1671. {
  1672. AA_BUG(!ls);
  1673. rwlock_init(&ls->lock);
  1674. ls->root = RB_ROOT;
  1675. }
  1676. static struct aa_label *labelset_next_stale(struct aa_labelset *ls)
  1677. {
  1678. struct aa_label *label;
  1679. struct rb_node *node;
  1680. unsigned long flags;
  1681. AA_BUG(!ls);
  1682. read_lock_irqsave(&ls->lock, flags);
  1683. __labelset_for_each(ls, node) {
  1684. label = rb_entry(node, struct aa_label, node);
  1685. if ((label_is_stale(label) ||
  1686. vec_is_stale(label->vec, label->size)) &&
  1687. __aa_get_label(label))
  1688. goto out;
  1689. }
  1690. label = NULL;
  1691. out:
  1692. read_unlock_irqrestore(&ls->lock, flags);
  1693. return label;
  1694. }
  1695. /**
  1696. * __label_update - insert updated version of @label into labelset
  1697. * @label - the label to update/repace
  1698. *
  1699. * Returns: new label that is up to date
  1700. * else NULL on failure
  1701. *
  1702. * Requires: @ns lock be held
  1703. *
  1704. * Note: worst case is the stale @label does not get updated and has
  1705. * to be updated at a later time.
  1706. */
  1707. static struct aa_label *__label_update(struct aa_label *label)
  1708. {
  1709. struct aa_label *new, *tmp;
  1710. struct aa_labelset *ls;
  1711. unsigned long flags;
  1712. int i, invcount = 0;
  1713. AA_BUG(!label);
  1714. AA_BUG(!mutex_is_locked(&labels_ns(label)->lock));
  1715. new = aa_label_alloc(label->size, label->proxy, GFP_KERNEL);
  1716. if (!new)
  1717. return NULL;
  1718. /*
  1719. * while holding the ns_lock will stop profile replacement, removal,
  1720. * and label updates, label merging and removal can be occurring
  1721. */
  1722. ls = labels_set(label);
  1723. write_lock_irqsave(&ls->lock, flags);
  1724. for (i = 0; i < label->size; i++) {
  1725. AA_BUG(!label->vec[i]);
  1726. new->vec[i] = aa_get_newest_profile(label->vec[i]);
  1727. AA_BUG(!new->vec[i]);
  1728. AA_BUG(!new->vec[i]->label.proxy);
  1729. AA_BUG(!new->vec[i]->label.proxy->label);
  1730. if (new->vec[i]->label.proxy != label->vec[i]->label.proxy)
  1731. invcount++;
  1732. }
  1733. /* updated stale label by being removed/renamed from labelset */
  1734. if (invcount) {
  1735. new->size -= aa_vec_unique(&new->vec[0], new->size,
  1736. VEC_FLAG_TERMINATE);
  1737. /* TODO: deal with reference labels */
  1738. if (new->size == 1) {
  1739. tmp = aa_get_label(&new->vec[0]->label);
  1740. AA_BUG(tmp == label);
  1741. goto remove;
  1742. }
  1743. if (labels_set(label) != labels_set(new)) {
  1744. write_unlock_irqrestore(&ls->lock, flags);
  1745. tmp = aa_label_insert(labels_set(new), new);
  1746. write_lock_irqsave(&ls->lock, flags);
  1747. goto remove;
  1748. }
  1749. } else
  1750. AA_BUG(labels_ns(label) != labels_ns(new));
  1751. tmp = __label_insert(labels_set(label), new, true);
  1752. remove:
  1753. /* ensure label is removed, and redirected correctly */
  1754. __label_remove(label, tmp);
  1755. write_unlock_irqrestore(&ls->lock, flags);
  1756. label_free_or_put_new(tmp, new);
  1757. return tmp;
  1758. }
  1759. /**
  1760. * __labelset_update - update labels in @ns
  1761. * @ns: namespace to update labels in (NOT NULL)
  1762. *
  1763. * Requires: @ns lock be held
  1764. *
  1765. * Walk the labelset ensuring that all labels are up to date and valid
  1766. * Any label that has a stale component is marked stale and replaced and
  1767. * by an updated version.
  1768. *
  1769. * If failures happen due to memory pressures then stale labels will
  1770. * be left in place until the next pass.
  1771. */
  1772. static void __labelset_update(struct aa_ns *ns)
  1773. {
  1774. struct aa_label *label;
  1775. AA_BUG(!ns);
  1776. AA_BUG(!mutex_is_locked(&ns->lock));
  1777. do {
  1778. label = labelset_next_stale(&ns->labels);
  1779. if (label) {
  1780. struct aa_label *l = __label_update(label);
  1781. aa_put_label(l);
  1782. aa_put_label(label);
  1783. }
  1784. } while (label);
  1785. }
  1786. /**
  1787. * __aa_labelset_udate_subtree - update all labels with a stale component
  1788. * @ns: ns to start update at (NOT NULL)
  1789. *
  1790. * Requires: @ns lock be held
  1791. *
  1792. * Invalidates labels based on @p in @ns and any children namespaces.
  1793. */
  1794. void __aa_labelset_update_subtree(struct aa_ns *ns)
  1795. {
  1796. struct aa_ns *child;
  1797. AA_BUG(!ns);
  1798. AA_BUG(!mutex_is_locked(&ns->lock));
  1799. __labelset_update(ns);
  1800. list_for_each_entry(child, &ns->sub_ns, base.list) {
  1801. mutex_lock_nested(&child->lock, child->level);
  1802. __aa_labelset_update_subtree(child);
  1803. mutex_unlock(&child->lock);
  1804. }
  1805. }