dir.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * fs/kernfs/dir.c - kernfs directory implementation
  3. *
  4. * Copyright (c) 2001-3 Patrick Mochel
  5. * Copyright (c) 2007 SUSE Linux Products GmbH
  6. * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
  7. *
  8. * This file is released under the GPLv2.
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/fs.h>
  12. #include <linux/namei.h>
  13. #include <linux/idr.h>
  14. #include <linux/slab.h>
  15. #include <linux/security.h>
  16. #include <linux/hash.h>
  17. #include "kernfs-internal.h"
  18. DEFINE_MUTEX(kernfs_mutex);
  19. static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
  20. static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
  21. #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
  22. static bool kernfs_active(struct kernfs_node *kn)
  23. {
  24. lockdep_assert_held(&kernfs_mutex);
  25. return atomic_read(&kn->active) >= 0;
  26. }
  27. static bool kernfs_lockdep(struct kernfs_node *kn)
  28. {
  29. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  30. return kn->flags & KERNFS_LOCKDEP;
  31. #else
  32. return false;
  33. #endif
  34. }
  35. static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
  36. {
  37. return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
  38. }
  39. /* kernfs_node_depth - compute depth from @from to @to */
  40. static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
  41. {
  42. size_t depth = 0;
  43. while (to->parent && to != from) {
  44. depth++;
  45. to = to->parent;
  46. }
  47. return depth;
  48. }
  49. static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
  50. struct kernfs_node *b)
  51. {
  52. size_t da, db;
  53. struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
  54. if (ra != rb)
  55. return NULL;
  56. da = kernfs_depth(ra->kn, a);
  57. db = kernfs_depth(rb->kn, b);
  58. while (da > db) {
  59. a = a->parent;
  60. da--;
  61. }
  62. while (db > da) {
  63. b = b->parent;
  64. db--;
  65. }
  66. /* worst case b and a will be the same at root */
  67. while (b != a) {
  68. b = b->parent;
  69. a = a->parent;
  70. }
  71. return a;
  72. }
  73. /**
  74. * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
  75. * where kn_from is treated as root of the path.
  76. * @kn_from: kernfs node which should be treated as root for the path
  77. * @kn_to: kernfs node to which path is needed
  78. * @buf: buffer to copy the path into
  79. * @buflen: size of @buf
  80. *
  81. * We need to handle couple of scenarios here:
  82. * [1] when @kn_from is an ancestor of @kn_to at some level
  83. * kn_from: /n1/n2/n3
  84. * kn_to: /n1/n2/n3/n4/n5
  85. * result: /n4/n5
  86. *
  87. * [2] when @kn_from is on a different hierarchy and we need to find common
  88. * ancestor between @kn_from and @kn_to.
  89. * kn_from: /n1/n2/n3/n4
  90. * kn_to: /n1/n2/n5
  91. * result: /../../n5
  92. * OR
  93. * kn_from: /n1/n2/n3/n4/n5 [depth=5]
  94. * kn_to: /n1/n2/n3 [depth=3]
  95. * result: /../..
  96. *
  97. * return value: length of the string. If greater than buflen,
  98. * then contents of buf are undefined. On error, -1 is returned.
  99. */
  100. static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
  101. struct kernfs_node *kn_from,
  102. char *buf, size_t buflen)
  103. {
  104. struct kernfs_node *kn, *common;
  105. const char parent_str[] = "/..";
  106. size_t depth_from, depth_to, len = 0, nlen = 0;
  107. char *p;
  108. int i;
  109. if (!kn_from)
  110. kn_from = kernfs_root(kn_to)->kn;
  111. if (kn_from == kn_to)
  112. return strlcpy(buf, "/", buflen);
  113. common = kernfs_common_ancestor(kn_from, kn_to);
  114. if (WARN_ON(!common))
  115. return -1;
  116. depth_to = kernfs_depth(common, kn_to);
  117. depth_from = kernfs_depth(common, kn_from);
  118. if (buf)
  119. buf[0] = '\0';
  120. for (i = 0; i < depth_from; i++)
  121. len += strlcpy(buf + len, parent_str,
  122. len < buflen ? buflen - len : 0);
  123. /* Calculate how many bytes we need for the rest */
  124. for (kn = kn_to; kn != common; kn = kn->parent)
  125. nlen += strlen(kn->name) + 1;
  126. if (len + nlen >= buflen)
  127. return len + nlen;
  128. p = buf + len + nlen;
  129. *p = '\0';
  130. for (kn = kn_to; kn != common; kn = kn->parent) {
  131. size_t tmp = strlen(kn->name);
  132. p -= tmp;
  133. memcpy(p, kn->name, tmp);
  134. *(--p) = '/';
  135. }
  136. return len + nlen;
  137. }
  138. /**
  139. * kernfs_name - obtain the name of a given node
  140. * @kn: kernfs_node of interest
  141. * @buf: buffer to copy @kn's name into
  142. * @buflen: size of @buf
  143. *
  144. * Copies the name of @kn into @buf of @buflen bytes. The behavior is
  145. * similar to strlcpy(). It returns the length of @kn's name and if @buf
  146. * isn't long enough, it's filled upto @buflen-1 and nul terminated.
  147. *
  148. * This function can be called from any context.
  149. */
  150. int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
  151. {
  152. unsigned long flags;
  153. int ret;
  154. spin_lock_irqsave(&kernfs_rename_lock, flags);
  155. ret = kernfs_name_locked(kn, buf, buflen);
  156. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  157. return ret;
  158. }
  159. /**
  160. * kernfs_path_len - determine the length of the full path of a given node
  161. * @kn: kernfs_node of interest
  162. *
  163. * The returned length doesn't include the space for the terminating '\0'.
  164. */
  165. size_t kernfs_path_len(struct kernfs_node *kn)
  166. {
  167. size_t len = 0;
  168. unsigned long flags;
  169. spin_lock_irqsave(&kernfs_rename_lock, flags);
  170. do {
  171. len += strlen(kn->name) + 1;
  172. kn = kn->parent;
  173. } while (kn && kn->parent);
  174. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  175. return len;
  176. }
  177. /**
  178. * kernfs_path_from_node - build path of node @to relative to @from.
  179. * @from: parent kernfs_node relative to which we need to build the path
  180. * @to: kernfs_node of interest
  181. * @buf: buffer to copy @to's path into
  182. * @buflen: size of @buf
  183. *
  184. * Builds @to's path relative to @from in @buf. @from and @to must
  185. * be on the same kernfs-root. If @from is not parent of @to, then a relative
  186. * path (which includes '..'s) as needed to reach from @from to @to is
  187. * returned.
  188. *
  189. * If @buf isn't long enough, the return value will be greater than @buflen
  190. * and @buf contents are undefined.
  191. */
  192. int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
  193. char *buf, size_t buflen)
  194. {
  195. unsigned long flags;
  196. int ret;
  197. spin_lock_irqsave(&kernfs_rename_lock, flags);
  198. ret = kernfs_path_from_node_locked(to, from, buf, buflen);
  199. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  200. return ret;
  201. }
  202. EXPORT_SYMBOL_GPL(kernfs_path_from_node);
  203. /**
  204. * kernfs_path - build full path of a given node
  205. * @kn: kernfs_node of interest
  206. * @buf: buffer to copy @kn's name into
  207. * @buflen: size of @buf
  208. *
  209. * Builds and returns the full path of @kn in @buf of @buflen bytes. The
  210. * path is built from the end of @buf so the returned pointer usually
  211. * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
  212. * and %NULL is returned.
  213. */
  214. char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
  215. {
  216. int ret;
  217. ret = kernfs_path_from_node(kn, NULL, buf, buflen);
  218. if (ret < 0 || ret >= buflen)
  219. return NULL;
  220. return buf;
  221. }
  222. EXPORT_SYMBOL_GPL(kernfs_path);
  223. /**
  224. * pr_cont_kernfs_name - pr_cont name of a kernfs_node
  225. * @kn: kernfs_node of interest
  226. *
  227. * This function can be called from any context.
  228. */
  229. void pr_cont_kernfs_name(struct kernfs_node *kn)
  230. {
  231. unsigned long flags;
  232. spin_lock_irqsave(&kernfs_rename_lock, flags);
  233. kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
  234. pr_cont("%s", kernfs_pr_cont_buf);
  235. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  236. }
  237. /**
  238. * pr_cont_kernfs_path - pr_cont path of a kernfs_node
  239. * @kn: kernfs_node of interest
  240. *
  241. * This function can be called from any context.
  242. */
  243. void pr_cont_kernfs_path(struct kernfs_node *kn)
  244. {
  245. unsigned long flags;
  246. int sz;
  247. spin_lock_irqsave(&kernfs_rename_lock, flags);
  248. sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
  249. sizeof(kernfs_pr_cont_buf));
  250. if (sz < 0) {
  251. pr_cont("(error)");
  252. goto out;
  253. }
  254. if (sz >= sizeof(kernfs_pr_cont_buf)) {
  255. pr_cont("(name too long)");
  256. goto out;
  257. }
  258. pr_cont("%s", kernfs_pr_cont_buf);
  259. out:
  260. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  261. }
  262. /**
  263. * kernfs_get_parent - determine the parent node and pin it
  264. * @kn: kernfs_node of interest
  265. *
  266. * Determines @kn's parent, pins and returns it. This function can be
  267. * called from any context.
  268. */
  269. struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
  270. {
  271. struct kernfs_node *parent;
  272. unsigned long flags;
  273. spin_lock_irqsave(&kernfs_rename_lock, flags);
  274. parent = kn->parent;
  275. kernfs_get(parent);
  276. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  277. return parent;
  278. }
  279. /**
  280. * kernfs_name_hash
  281. * @name: Null terminated string to hash
  282. * @ns: Namespace tag to hash
  283. *
  284. * Returns 31 bit hash of ns + name (so it fits in an off_t )
  285. */
  286. static unsigned int kernfs_name_hash(const char *name, const void *ns)
  287. {
  288. unsigned long hash = init_name_hash(ns);
  289. unsigned int len = strlen(name);
  290. while (len--)
  291. hash = partial_name_hash(*name++, hash);
  292. hash = end_name_hash(hash);
  293. hash &= 0x7fffffffU;
  294. /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
  295. if (hash < 2)
  296. hash += 2;
  297. if (hash >= INT_MAX)
  298. hash = INT_MAX - 1;
  299. return hash;
  300. }
  301. static int kernfs_name_compare(unsigned int hash, const char *name,
  302. const void *ns, const struct kernfs_node *kn)
  303. {
  304. if (hash < kn->hash)
  305. return -1;
  306. if (hash > kn->hash)
  307. return 1;
  308. if (ns < kn->ns)
  309. return -1;
  310. if (ns > kn->ns)
  311. return 1;
  312. return strcmp(name, kn->name);
  313. }
  314. static int kernfs_sd_compare(const struct kernfs_node *left,
  315. const struct kernfs_node *right)
  316. {
  317. return kernfs_name_compare(left->hash, left->name, left->ns, right);
  318. }
  319. /**
  320. * kernfs_link_sibling - link kernfs_node into sibling rbtree
  321. * @kn: kernfs_node of interest
  322. *
  323. * Link @kn into its sibling rbtree which starts from
  324. * @kn->parent->dir.children.
  325. *
  326. * Locking:
  327. * mutex_lock(kernfs_mutex)
  328. *
  329. * RETURNS:
  330. * 0 on susccess -EEXIST on failure.
  331. */
  332. static int kernfs_link_sibling(struct kernfs_node *kn)
  333. {
  334. struct rb_node **node = &kn->parent->dir.children.rb_node;
  335. struct rb_node *parent = NULL;
  336. while (*node) {
  337. struct kernfs_node *pos;
  338. int result;
  339. pos = rb_to_kn(*node);
  340. parent = *node;
  341. result = kernfs_sd_compare(kn, pos);
  342. if (result < 0)
  343. node = &pos->rb.rb_left;
  344. else if (result > 0)
  345. node = &pos->rb.rb_right;
  346. else
  347. return -EEXIST;
  348. }
  349. /* add new node and rebalance the tree */
  350. rb_link_node(&kn->rb, parent, node);
  351. rb_insert_color(&kn->rb, &kn->parent->dir.children);
  352. /* successfully added, account subdir number */
  353. if (kernfs_type(kn) == KERNFS_DIR)
  354. kn->parent->dir.subdirs++;
  355. return 0;
  356. }
  357. /**
  358. * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
  359. * @kn: kernfs_node of interest
  360. *
  361. * Try to unlink @kn from its sibling rbtree which starts from
  362. * kn->parent->dir.children. Returns %true if @kn was actually
  363. * removed, %false if @kn wasn't on the rbtree.
  364. *
  365. * Locking:
  366. * mutex_lock(kernfs_mutex)
  367. */
  368. static bool kernfs_unlink_sibling(struct kernfs_node *kn)
  369. {
  370. if (RB_EMPTY_NODE(&kn->rb))
  371. return false;
  372. if (kernfs_type(kn) == KERNFS_DIR)
  373. kn->parent->dir.subdirs--;
  374. rb_erase(&kn->rb, &kn->parent->dir.children);
  375. RB_CLEAR_NODE(&kn->rb);
  376. return true;
  377. }
  378. /**
  379. * kernfs_get_active - get an active reference to kernfs_node
  380. * @kn: kernfs_node to get an active reference to
  381. *
  382. * Get an active reference of @kn. This function is noop if @kn
  383. * is NULL.
  384. *
  385. * RETURNS:
  386. * Pointer to @kn on success, NULL on failure.
  387. */
  388. struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
  389. {
  390. if (unlikely(!kn))
  391. return NULL;
  392. if (!atomic_inc_unless_negative(&kn->active))
  393. return NULL;
  394. if (kernfs_lockdep(kn))
  395. rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
  396. return kn;
  397. }
  398. /**
  399. * kernfs_put_active - put an active reference to kernfs_node
  400. * @kn: kernfs_node to put an active reference to
  401. *
  402. * Put an active reference to @kn. This function is noop if @kn
  403. * is NULL.
  404. */
  405. void kernfs_put_active(struct kernfs_node *kn)
  406. {
  407. struct kernfs_root *root = kernfs_root(kn);
  408. int v;
  409. if (unlikely(!kn))
  410. return;
  411. if (kernfs_lockdep(kn))
  412. rwsem_release(&kn->dep_map, 1, _RET_IP_);
  413. v = atomic_dec_return(&kn->active);
  414. if (likely(v != KN_DEACTIVATED_BIAS))
  415. return;
  416. wake_up_all(&root->deactivate_waitq);
  417. }
  418. /**
  419. * kernfs_drain - drain kernfs_node
  420. * @kn: kernfs_node to drain
  421. *
  422. * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
  423. * removers may invoke this function concurrently on @kn and all will
  424. * return after draining is complete.
  425. */
  426. static void kernfs_drain(struct kernfs_node *kn)
  427. __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
  428. {
  429. struct kernfs_root *root = kernfs_root(kn);
  430. lockdep_assert_held(&kernfs_mutex);
  431. WARN_ON_ONCE(kernfs_active(kn));
  432. mutex_unlock(&kernfs_mutex);
  433. if (kernfs_lockdep(kn)) {
  434. rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
  435. if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
  436. lock_contended(&kn->dep_map, _RET_IP_);
  437. }
  438. /* but everyone should wait for draining */
  439. wait_event(root->deactivate_waitq,
  440. atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
  441. if (kernfs_lockdep(kn)) {
  442. lock_acquired(&kn->dep_map, _RET_IP_);
  443. rwsem_release(&kn->dep_map, 1, _RET_IP_);
  444. }
  445. kernfs_unmap_bin_file(kn);
  446. mutex_lock(&kernfs_mutex);
  447. }
  448. /**
  449. * kernfs_get - get a reference count on a kernfs_node
  450. * @kn: the target kernfs_node
  451. */
  452. void kernfs_get(struct kernfs_node *kn)
  453. {
  454. if (kn) {
  455. WARN_ON(!atomic_read(&kn->count));
  456. atomic_inc(&kn->count);
  457. }
  458. }
  459. EXPORT_SYMBOL_GPL(kernfs_get);
  460. /**
  461. * kernfs_put - put a reference count on a kernfs_node
  462. * @kn: the target kernfs_node
  463. *
  464. * Put a reference count of @kn and destroy it if it reached zero.
  465. */
  466. void kernfs_put(struct kernfs_node *kn)
  467. {
  468. struct kernfs_node *parent;
  469. struct kernfs_root *root;
  470. if (!kn || !atomic_dec_and_test(&kn->count))
  471. return;
  472. root = kernfs_root(kn);
  473. repeat:
  474. /*
  475. * Moving/renaming is always done while holding reference.
  476. * kn->parent won't change beneath us.
  477. */
  478. parent = kn->parent;
  479. WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
  480. "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
  481. parent ? parent->name : "", kn->name, atomic_read(&kn->active));
  482. if (kernfs_type(kn) == KERNFS_LINK)
  483. kernfs_put(kn->symlink.target_kn);
  484. kfree_const(kn->name);
  485. if (kn->iattr) {
  486. if (kn->iattr->ia_secdata)
  487. security_release_secctx(kn->iattr->ia_secdata,
  488. kn->iattr->ia_secdata_len);
  489. simple_xattrs_free(&kn->iattr->xattrs);
  490. }
  491. kfree(kn->iattr);
  492. ida_simple_remove(&root->ino_ida, kn->ino);
  493. kmem_cache_free(kernfs_node_cache, kn);
  494. kn = parent;
  495. if (kn) {
  496. if (atomic_dec_and_test(&kn->count))
  497. goto repeat;
  498. } else {
  499. /* just released the root kn, free @root too */
  500. ida_destroy(&root->ino_ida);
  501. kfree(root);
  502. }
  503. }
  504. EXPORT_SYMBOL_GPL(kernfs_put);
  505. static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
  506. {
  507. struct kernfs_node *kn;
  508. if (flags & LOOKUP_RCU)
  509. return -ECHILD;
  510. /* Always perform fresh lookup for negatives */
  511. if (d_really_is_negative(dentry))
  512. goto out_bad_unlocked;
  513. kn = dentry->d_fsdata;
  514. mutex_lock(&kernfs_mutex);
  515. /* The kernfs node has been deactivated */
  516. if (!kernfs_active(kn))
  517. goto out_bad;
  518. /* The kernfs node has been moved? */
  519. if (dentry->d_parent->d_fsdata != kn->parent)
  520. goto out_bad;
  521. /* The kernfs node has been renamed */
  522. if (strcmp(dentry->d_name.name, kn->name) != 0)
  523. goto out_bad;
  524. /* The kernfs node has been moved to a different namespace */
  525. if (kn->parent && kernfs_ns_enabled(kn->parent) &&
  526. kernfs_info(dentry->d_sb)->ns != kn->ns)
  527. goto out_bad;
  528. mutex_unlock(&kernfs_mutex);
  529. return 1;
  530. out_bad:
  531. mutex_unlock(&kernfs_mutex);
  532. out_bad_unlocked:
  533. return 0;
  534. }
  535. static void kernfs_dop_release(struct dentry *dentry)
  536. {
  537. kernfs_put(dentry->d_fsdata);
  538. }
  539. const struct dentry_operations kernfs_dops = {
  540. .d_revalidate = kernfs_dop_revalidate,
  541. .d_release = kernfs_dop_release,
  542. };
  543. /**
  544. * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
  545. * @dentry: the dentry in question
  546. *
  547. * Return the kernfs_node associated with @dentry. If @dentry is not a
  548. * kernfs one, %NULL is returned.
  549. *
  550. * While the returned kernfs_node will stay accessible as long as @dentry
  551. * is accessible, the returned node can be in any state and the caller is
  552. * fully responsible for determining what's accessible.
  553. */
  554. struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
  555. {
  556. if (dentry->d_sb->s_op == &kernfs_sops)
  557. return dentry->d_fsdata;
  558. return NULL;
  559. }
  560. static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
  561. const char *name, umode_t mode,
  562. unsigned flags)
  563. {
  564. struct kernfs_node *kn;
  565. int ret;
  566. name = kstrdup_const(name, GFP_KERNEL);
  567. if (!name)
  568. return NULL;
  569. kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
  570. if (!kn)
  571. goto err_out1;
  572. ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
  573. if (ret < 0)
  574. goto err_out2;
  575. kn->ino = ret;
  576. atomic_set(&kn->count, 1);
  577. atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
  578. RB_CLEAR_NODE(&kn->rb);
  579. kn->name = name;
  580. kn->mode = mode;
  581. kn->flags = flags;
  582. return kn;
  583. err_out2:
  584. kmem_cache_free(kernfs_node_cache, kn);
  585. err_out1:
  586. kfree_const(name);
  587. return NULL;
  588. }
  589. struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
  590. const char *name, umode_t mode,
  591. unsigned flags)
  592. {
  593. struct kernfs_node *kn;
  594. kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
  595. if (kn) {
  596. kernfs_get(parent);
  597. kn->parent = parent;
  598. }
  599. return kn;
  600. }
  601. /**
  602. * kernfs_add_one - add kernfs_node to parent without warning
  603. * @kn: kernfs_node to be added
  604. *
  605. * The caller must already have initialized @kn->parent. This
  606. * function increments nlink of the parent's inode if @kn is a
  607. * directory and link into the children list of the parent.
  608. *
  609. * RETURNS:
  610. * 0 on success, -EEXIST if entry with the given name already
  611. * exists.
  612. */
  613. int kernfs_add_one(struct kernfs_node *kn)
  614. {
  615. struct kernfs_node *parent = kn->parent;
  616. struct kernfs_iattrs *ps_iattr;
  617. bool has_ns;
  618. int ret;
  619. mutex_lock(&kernfs_mutex);
  620. ret = -EINVAL;
  621. has_ns = kernfs_ns_enabled(parent);
  622. if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
  623. has_ns ? "required" : "invalid", parent->name, kn->name))
  624. goto out_unlock;
  625. if (kernfs_type(parent) != KERNFS_DIR)
  626. goto out_unlock;
  627. ret = -ENOENT;
  628. if (parent->flags & KERNFS_EMPTY_DIR)
  629. goto out_unlock;
  630. if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
  631. goto out_unlock;
  632. kn->hash = kernfs_name_hash(kn->name, kn->ns);
  633. ret = kernfs_link_sibling(kn);
  634. if (ret)
  635. goto out_unlock;
  636. /* Update timestamps on the parent */
  637. ps_iattr = parent->iattr;
  638. if (ps_iattr) {
  639. struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
  640. ktime_get_real_ts(&ps_iattrs->ia_ctime);
  641. ps_iattrs->ia_mtime = ps_iattrs->ia_ctime;
  642. }
  643. mutex_unlock(&kernfs_mutex);
  644. /*
  645. * Activate the new node unless CREATE_DEACTIVATED is requested.
  646. * If not activated here, the kernfs user is responsible for
  647. * activating the node with kernfs_activate(). A node which hasn't
  648. * been activated is not visible to userland and its removal won't
  649. * trigger deactivation.
  650. */
  651. if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
  652. kernfs_activate(kn);
  653. return 0;
  654. out_unlock:
  655. mutex_unlock(&kernfs_mutex);
  656. return ret;
  657. }
  658. /**
  659. * kernfs_find_ns - find kernfs_node with the given name
  660. * @parent: kernfs_node to search under
  661. * @name: name to look for
  662. * @ns: the namespace tag to use
  663. *
  664. * Look for kernfs_node with name @name under @parent. Returns pointer to
  665. * the found kernfs_node on success, %NULL on failure.
  666. */
  667. static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
  668. const unsigned char *name,
  669. const void *ns)
  670. {
  671. struct rb_node *node = parent->dir.children.rb_node;
  672. bool has_ns = kernfs_ns_enabled(parent);
  673. unsigned int hash;
  674. lockdep_assert_held(&kernfs_mutex);
  675. if (has_ns != (bool)ns) {
  676. WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
  677. has_ns ? "required" : "invalid", parent->name, name);
  678. return NULL;
  679. }
  680. hash = kernfs_name_hash(name, ns);
  681. while (node) {
  682. struct kernfs_node *kn;
  683. int result;
  684. kn = rb_to_kn(node);
  685. result = kernfs_name_compare(hash, name, ns, kn);
  686. if (result < 0)
  687. node = node->rb_left;
  688. else if (result > 0)
  689. node = node->rb_right;
  690. else
  691. return kn;
  692. }
  693. return NULL;
  694. }
  695. static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
  696. const unsigned char *path,
  697. const void *ns)
  698. {
  699. size_t len;
  700. char *p, *name;
  701. lockdep_assert_held(&kernfs_mutex);
  702. /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
  703. spin_lock_irq(&kernfs_rename_lock);
  704. len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
  705. if (len >= sizeof(kernfs_pr_cont_buf)) {
  706. spin_unlock_irq(&kernfs_rename_lock);
  707. return NULL;
  708. }
  709. p = kernfs_pr_cont_buf;
  710. while ((name = strsep(&p, "/")) && parent) {
  711. if (*name == '\0')
  712. continue;
  713. parent = kernfs_find_ns(parent, name, ns);
  714. }
  715. spin_unlock_irq(&kernfs_rename_lock);
  716. return parent;
  717. }
  718. /**
  719. * kernfs_find_and_get_ns - find and get kernfs_node with the given name
  720. * @parent: kernfs_node to search under
  721. * @name: name to look for
  722. * @ns: the namespace tag to use
  723. *
  724. * Look for kernfs_node with name @name under @parent and get a reference
  725. * if found. This function may sleep and returns pointer to the found
  726. * kernfs_node on success, %NULL on failure.
  727. */
  728. struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
  729. const char *name, const void *ns)
  730. {
  731. struct kernfs_node *kn;
  732. mutex_lock(&kernfs_mutex);
  733. kn = kernfs_find_ns(parent, name, ns);
  734. kernfs_get(kn);
  735. mutex_unlock(&kernfs_mutex);
  736. return kn;
  737. }
  738. EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
  739. /**
  740. * kernfs_walk_and_get_ns - find and get kernfs_node with the given path
  741. * @parent: kernfs_node to search under
  742. * @path: path to look for
  743. * @ns: the namespace tag to use
  744. *
  745. * Look for kernfs_node with path @path under @parent and get a reference
  746. * if found. This function may sleep and returns pointer to the found
  747. * kernfs_node on success, %NULL on failure.
  748. */
  749. struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
  750. const char *path, const void *ns)
  751. {
  752. struct kernfs_node *kn;
  753. mutex_lock(&kernfs_mutex);
  754. kn = kernfs_walk_ns(parent, path, ns);
  755. kernfs_get(kn);
  756. mutex_unlock(&kernfs_mutex);
  757. return kn;
  758. }
  759. /**
  760. * kernfs_create_root - create a new kernfs hierarchy
  761. * @scops: optional syscall operations for the hierarchy
  762. * @flags: KERNFS_ROOT_* flags
  763. * @priv: opaque data associated with the new directory
  764. *
  765. * Returns the root of the new hierarchy on success, ERR_PTR() value on
  766. * failure.
  767. */
  768. struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
  769. unsigned int flags, void *priv)
  770. {
  771. struct kernfs_root *root;
  772. struct kernfs_node *kn;
  773. root = kzalloc(sizeof(*root), GFP_KERNEL);
  774. if (!root)
  775. return ERR_PTR(-ENOMEM);
  776. ida_init(&root->ino_ida);
  777. INIT_LIST_HEAD(&root->supers);
  778. kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
  779. KERNFS_DIR);
  780. if (!kn) {
  781. ida_destroy(&root->ino_ida);
  782. kfree(root);
  783. return ERR_PTR(-ENOMEM);
  784. }
  785. kn->priv = priv;
  786. kn->dir.root = root;
  787. root->syscall_ops = scops;
  788. root->flags = flags;
  789. root->kn = kn;
  790. init_waitqueue_head(&root->deactivate_waitq);
  791. if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
  792. kernfs_activate(kn);
  793. return root;
  794. }
  795. /**
  796. * kernfs_destroy_root - destroy a kernfs hierarchy
  797. * @root: root of the hierarchy to destroy
  798. *
  799. * Destroy the hierarchy anchored at @root by removing all existing
  800. * directories and destroying @root.
  801. */
  802. void kernfs_destroy_root(struct kernfs_root *root)
  803. {
  804. kernfs_remove(root->kn); /* will also free @root */
  805. }
  806. /**
  807. * kernfs_create_dir_ns - create a directory
  808. * @parent: parent in which to create a new directory
  809. * @name: name of the new directory
  810. * @mode: mode of the new directory
  811. * @priv: opaque data associated with the new directory
  812. * @ns: optional namespace tag of the directory
  813. *
  814. * Returns the created node on success, ERR_PTR() value on failure.
  815. */
  816. struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
  817. const char *name, umode_t mode,
  818. void *priv, const void *ns)
  819. {
  820. struct kernfs_node *kn;
  821. int rc;
  822. /* allocate */
  823. kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
  824. if (!kn)
  825. return ERR_PTR(-ENOMEM);
  826. kn->dir.root = parent->dir.root;
  827. kn->ns = ns;
  828. kn->priv = priv;
  829. /* link in */
  830. rc = kernfs_add_one(kn);
  831. if (!rc)
  832. return kn;
  833. kernfs_put(kn);
  834. return ERR_PTR(rc);
  835. }
  836. /**
  837. * kernfs_create_empty_dir - create an always empty directory
  838. * @parent: parent in which to create a new directory
  839. * @name: name of the new directory
  840. *
  841. * Returns the created node on success, ERR_PTR() value on failure.
  842. */
  843. struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
  844. const char *name)
  845. {
  846. struct kernfs_node *kn;
  847. int rc;
  848. /* allocate */
  849. kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
  850. if (!kn)
  851. return ERR_PTR(-ENOMEM);
  852. kn->flags |= KERNFS_EMPTY_DIR;
  853. kn->dir.root = parent->dir.root;
  854. kn->ns = NULL;
  855. kn->priv = NULL;
  856. /* link in */
  857. rc = kernfs_add_one(kn);
  858. if (!rc)
  859. return kn;
  860. kernfs_put(kn);
  861. return ERR_PTR(rc);
  862. }
  863. static struct dentry *kernfs_iop_lookup(struct inode *dir,
  864. struct dentry *dentry,
  865. unsigned int flags)
  866. {
  867. struct dentry *ret;
  868. struct kernfs_node *parent = dentry->d_parent->d_fsdata;
  869. struct kernfs_node *kn;
  870. struct inode *inode;
  871. const void *ns = NULL;
  872. mutex_lock(&kernfs_mutex);
  873. if (kernfs_ns_enabled(parent))
  874. ns = kernfs_info(dir->i_sb)->ns;
  875. kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
  876. /* no such entry */
  877. if (!kn || !kernfs_active(kn)) {
  878. ret = NULL;
  879. goto out_unlock;
  880. }
  881. kernfs_get(kn);
  882. dentry->d_fsdata = kn;
  883. /* attach dentry and inode */
  884. inode = kernfs_get_inode(dir->i_sb, kn);
  885. if (!inode) {
  886. ret = ERR_PTR(-ENOMEM);
  887. goto out_unlock;
  888. }
  889. /* instantiate and hash dentry */
  890. ret = d_splice_alias(inode, dentry);
  891. out_unlock:
  892. mutex_unlock(&kernfs_mutex);
  893. return ret;
  894. }
  895. static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
  896. umode_t mode)
  897. {
  898. struct kernfs_node *parent = dir->i_private;
  899. struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
  900. int ret;
  901. if (!scops || !scops->mkdir)
  902. return -EPERM;
  903. if (!kernfs_get_active(parent))
  904. return -ENODEV;
  905. ret = scops->mkdir(parent, dentry->d_name.name, mode);
  906. kernfs_put_active(parent);
  907. return ret;
  908. }
  909. static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
  910. {
  911. struct kernfs_node *kn = dentry->d_fsdata;
  912. struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
  913. int ret;
  914. if (!scops || !scops->rmdir)
  915. return -EPERM;
  916. if (!kernfs_get_active(kn))
  917. return -ENODEV;
  918. ret = scops->rmdir(kn);
  919. kernfs_put_active(kn);
  920. return ret;
  921. }
  922. static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
  923. struct inode *new_dir, struct dentry *new_dentry)
  924. {
  925. struct kernfs_node *kn = old_dentry->d_fsdata;
  926. struct kernfs_node *new_parent = new_dir->i_private;
  927. struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
  928. int ret;
  929. if (!scops || !scops->rename)
  930. return -EPERM;
  931. if (!kernfs_get_active(kn))
  932. return -ENODEV;
  933. if (!kernfs_get_active(new_parent)) {
  934. kernfs_put_active(kn);
  935. return -ENODEV;
  936. }
  937. ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
  938. kernfs_put_active(new_parent);
  939. kernfs_put_active(kn);
  940. return ret;
  941. }
  942. const struct inode_operations kernfs_dir_iops = {
  943. .lookup = kernfs_iop_lookup,
  944. .permission = kernfs_iop_permission,
  945. .setattr = kernfs_iop_setattr,
  946. .getattr = kernfs_iop_getattr,
  947. .setxattr = kernfs_iop_setxattr,
  948. .removexattr = kernfs_iop_removexattr,
  949. .getxattr = kernfs_iop_getxattr,
  950. .listxattr = kernfs_iop_listxattr,
  951. .mkdir = kernfs_iop_mkdir,
  952. .rmdir = kernfs_iop_rmdir,
  953. .rename = kernfs_iop_rename,
  954. };
  955. static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
  956. {
  957. struct kernfs_node *last;
  958. while (true) {
  959. struct rb_node *rbn;
  960. last = pos;
  961. if (kernfs_type(pos) != KERNFS_DIR)
  962. break;
  963. rbn = rb_first(&pos->dir.children);
  964. if (!rbn)
  965. break;
  966. pos = rb_to_kn(rbn);
  967. }
  968. return last;
  969. }
  970. /**
  971. * kernfs_next_descendant_post - find the next descendant for post-order walk
  972. * @pos: the current position (%NULL to initiate traversal)
  973. * @root: kernfs_node whose descendants to walk
  974. *
  975. * Find the next descendant to visit for post-order traversal of @root's
  976. * descendants. @root is included in the iteration and the last node to be
  977. * visited.
  978. */
  979. static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
  980. struct kernfs_node *root)
  981. {
  982. struct rb_node *rbn;
  983. lockdep_assert_held(&kernfs_mutex);
  984. /* if first iteration, visit leftmost descendant which may be root */
  985. if (!pos)
  986. return kernfs_leftmost_descendant(root);
  987. /* if we visited @root, we're done */
  988. if (pos == root)
  989. return NULL;
  990. /* if there's an unvisited sibling, visit its leftmost descendant */
  991. rbn = rb_next(&pos->rb);
  992. if (rbn)
  993. return kernfs_leftmost_descendant(rb_to_kn(rbn));
  994. /* no sibling left, visit parent */
  995. return pos->parent;
  996. }
  997. /**
  998. * kernfs_activate - activate a node which started deactivated
  999. * @kn: kernfs_node whose subtree is to be activated
  1000. *
  1001. * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
  1002. * needs to be explicitly activated. A node which hasn't been activated
  1003. * isn't visible to userland and deactivation is skipped during its
  1004. * removal. This is useful to construct atomic init sequences where
  1005. * creation of multiple nodes should either succeed or fail atomically.
  1006. *
  1007. * The caller is responsible for ensuring that this function is not called
  1008. * after kernfs_remove*() is invoked on @kn.
  1009. */
  1010. void kernfs_activate(struct kernfs_node *kn)
  1011. {
  1012. struct kernfs_node *pos;
  1013. mutex_lock(&kernfs_mutex);
  1014. pos = NULL;
  1015. while ((pos = kernfs_next_descendant_post(pos, kn))) {
  1016. if (!pos || (pos->flags & KERNFS_ACTIVATED))
  1017. continue;
  1018. WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
  1019. WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
  1020. atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
  1021. pos->flags |= KERNFS_ACTIVATED;
  1022. }
  1023. mutex_unlock(&kernfs_mutex);
  1024. }
  1025. static void __kernfs_remove(struct kernfs_node *kn)
  1026. {
  1027. struct kernfs_node *pos;
  1028. lockdep_assert_held(&kernfs_mutex);
  1029. /*
  1030. * Short-circuit if non-root @kn has already finished removal.
  1031. * This is for kernfs_remove_self() which plays with active ref
  1032. * after removal.
  1033. */
  1034. if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
  1035. return;
  1036. pr_debug("kernfs %s: removing\n", kn->name);
  1037. /* prevent any new usage under @kn by deactivating all nodes */
  1038. pos = NULL;
  1039. while ((pos = kernfs_next_descendant_post(pos, kn)))
  1040. if (kernfs_active(pos))
  1041. atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
  1042. /* deactivate and unlink the subtree node-by-node */
  1043. do {
  1044. pos = kernfs_leftmost_descendant(kn);
  1045. /*
  1046. * kernfs_drain() drops kernfs_mutex temporarily and @pos's
  1047. * base ref could have been put by someone else by the time
  1048. * the function returns. Make sure it doesn't go away
  1049. * underneath us.
  1050. */
  1051. kernfs_get(pos);
  1052. /*
  1053. * Drain iff @kn was activated. This avoids draining and
  1054. * its lockdep annotations for nodes which have never been
  1055. * activated and allows embedding kernfs_remove() in create
  1056. * error paths without worrying about draining.
  1057. */
  1058. if (kn->flags & KERNFS_ACTIVATED)
  1059. kernfs_drain(pos);
  1060. else
  1061. WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
  1062. /*
  1063. * kernfs_unlink_sibling() succeeds once per node. Use it
  1064. * to decide who's responsible for cleanups.
  1065. */
  1066. if (!pos->parent || kernfs_unlink_sibling(pos)) {
  1067. struct kernfs_iattrs *ps_iattr =
  1068. pos->parent ? pos->parent->iattr : NULL;
  1069. /* update timestamps on the parent */
  1070. if (ps_iattr) {
  1071. ktime_get_real_ts(&ps_iattr->ia_iattr.ia_ctime);
  1072. ps_iattr->ia_iattr.ia_mtime =
  1073. ps_iattr->ia_iattr.ia_ctime;
  1074. }
  1075. kernfs_put(pos);
  1076. }
  1077. kernfs_put(pos);
  1078. } while (pos != kn);
  1079. }
  1080. /**
  1081. * kernfs_remove - remove a kernfs_node recursively
  1082. * @kn: the kernfs_node to remove
  1083. *
  1084. * Remove @kn along with all its subdirectories and files.
  1085. */
  1086. void kernfs_remove(struct kernfs_node *kn)
  1087. {
  1088. mutex_lock(&kernfs_mutex);
  1089. __kernfs_remove(kn);
  1090. mutex_unlock(&kernfs_mutex);
  1091. }
  1092. /**
  1093. * kernfs_break_active_protection - break out of active protection
  1094. * @kn: the self kernfs_node
  1095. *
  1096. * The caller must be running off of a kernfs operation which is invoked
  1097. * with an active reference - e.g. one of kernfs_ops. Each invocation of
  1098. * this function must also be matched with an invocation of
  1099. * kernfs_unbreak_active_protection().
  1100. *
  1101. * This function releases the active reference of @kn the caller is
  1102. * holding. Once this function is called, @kn may be removed at any point
  1103. * and the caller is solely responsible for ensuring that the objects it
  1104. * dereferences are accessible.
  1105. */
  1106. void kernfs_break_active_protection(struct kernfs_node *kn)
  1107. {
  1108. /*
  1109. * Take out ourself out of the active ref dependency chain. If
  1110. * we're called without an active ref, lockdep will complain.
  1111. */
  1112. kernfs_put_active(kn);
  1113. }
  1114. /**
  1115. * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
  1116. * @kn: the self kernfs_node
  1117. *
  1118. * If kernfs_break_active_protection() was called, this function must be
  1119. * invoked before finishing the kernfs operation. Note that while this
  1120. * function restores the active reference, it doesn't and can't actually
  1121. * restore the active protection - @kn may already or be in the process of
  1122. * being removed. Once kernfs_break_active_protection() is invoked, that
  1123. * protection is irreversibly gone for the kernfs operation instance.
  1124. *
  1125. * While this function may be called at any point after
  1126. * kernfs_break_active_protection() is invoked, its most useful location
  1127. * would be right before the enclosing kernfs operation returns.
  1128. */
  1129. void kernfs_unbreak_active_protection(struct kernfs_node *kn)
  1130. {
  1131. /*
  1132. * @kn->active could be in any state; however, the increment we do
  1133. * here will be undone as soon as the enclosing kernfs operation
  1134. * finishes and this temporary bump can't break anything. If @kn
  1135. * is alive, nothing changes. If @kn is being deactivated, the
  1136. * soon-to-follow put will either finish deactivation or restore
  1137. * deactivated state. If @kn is already removed, the temporary
  1138. * bump is guaranteed to be gone before @kn is released.
  1139. */
  1140. atomic_inc(&kn->active);
  1141. if (kernfs_lockdep(kn))
  1142. rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
  1143. }
  1144. /**
  1145. * kernfs_remove_self - remove a kernfs_node from its own method
  1146. * @kn: the self kernfs_node to remove
  1147. *
  1148. * The caller must be running off of a kernfs operation which is invoked
  1149. * with an active reference - e.g. one of kernfs_ops. This can be used to
  1150. * implement a file operation which deletes itself.
  1151. *
  1152. * For example, the "delete" file for a sysfs device directory can be
  1153. * implemented by invoking kernfs_remove_self() on the "delete" file
  1154. * itself. This function breaks the circular dependency of trying to
  1155. * deactivate self while holding an active ref itself. It isn't necessary
  1156. * to modify the usual removal path to use kernfs_remove_self(). The
  1157. * "delete" implementation can simply invoke kernfs_remove_self() on self
  1158. * before proceeding with the usual removal path. kernfs will ignore later
  1159. * kernfs_remove() on self.
  1160. *
  1161. * kernfs_remove_self() can be called multiple times concurrently on the
  1162. * same kernfs_node. Only the first one actually performs removal and
  1163. * returns %true. All others will wait until the kernfs operation which
  1164. * won self-removal finishes and return %false. Note that the losers wait
  1165. * for the completion of not only the winning kernfs_remove_self() but also
  1166. * the whole kernfs_ops which won the arbitration. This can be used to
  1167. * guarantee, for example, all concurrent writes to a "delete" file to
  1168. * finish only after the whole operation is complete.
  1169. */
  1170. bool kernfs_remove_self(struct kernfs_node *kn)
  1171. {
  1172. bool ret;
  1173. mutex_lock(&kernfs_mutex);
  1174. kernfs_break_active_protection(kn);
  1175. /*
  1176. * SUICIDAL is used to arbitrate among competing invocations. Only
  1177. * the first one will actually perform removal. When the removal
  1178. * is complete, SUICIDED is set and the active ref is restored
  1179. * while holding kernfs_mutex. The ones which lost arbitration
  1180. * waits for SUICDED && drained which can happen only after the
  1181. * enclosing kernfs operation which executed the winning instance
  1182. * of kernfs_remove_self() finished.
  1183. */
  1184. if (!(kn->flags & KERNFS_SUICIDAL)) {
  1185. kn->flags |= KERNFS_SUICIDAL;
  1186. __kernfs_remove(kn);
  1187. kn->flags |= KERNFS_SUICIDED;
  1188. ret = true;
  1189. } else {
  1190. wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
  1191. DEFINE_WAIT(wait);
  1192. while (true) {
  1193. prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
  1194. if ((kn->flags & KERNFS_SUICIDED) &&
  1195. atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
  1196. break;
  1197. mutex_unlock(&kernfs_mutex);
  1198. schedule();
  1199. mutex_lock(&kernfs_mutex);
  1200. }
  1201. finish_wait(waitq, &wait);
  1202. WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
  1203. ret = false;
  1204. }
  1205. /*
  1206. * This must be done while holding kernfs_mutex; otherwise, waiting
  1207. * for SUICIDED && deactivated could finish prematurely.
  1208. */
  1209. kernfs_unbreak_active_protection(kn);
  1210. mutex_unlock(&kernfs_mutex);
  1211. return ret;
  1212. }
  1213. /**
  1214. * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
  1215. * @parent: parent of the target
  1216. * @name: name of the kernfs_node to remove
  1217. * @ns: namespace tag of the kernfs_node to remove
  1218. *
  1219. * Look for the kernfs_node with @name and @ns under @parent and remove it.
  1220. * Returns 0 on success, -ENOENT if such entry doesn't exist.
  1221. */
  1222. int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
  1223. const void *ns)
  1224. {
  1225. struct kernfs_node *kn;
  1226. if (!parent) {
  1227. WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
  1228. name);
  1229. return -ENOENT;
  1230. }
  1231. mutex_lock(&kernfs_mutex);
  1232. kn = kernfs_find_ns(parent, name, ns);
  1233. if (kn)
  1234. __kernfs_remove(kn);
  1235. mutex_unlock(&kernfs_mutex);
  1236. if (kn)
  1237. return 0;
  1238. else
  1239. return -ENOENT;
  1240. }
  1241. /**
  1242. * kernfs_rename_ns - move and rename a kernfs_node
  1243. * @kn: target node
  1244. * @new_parent: new parent to put @sd under
  1245. * @new_name: new name
  1246. * @new_ns: new namespace tag
  1247. */
  1248. int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
  1249. const char *new_name, const void *new_ns)
  1250. {
  1251. struct kernfs_node *old_parent;
  1252. const char *old_name = NULL;
  1253. int error;
  1254. /* can't move or rename root */
  1255. if (!kn->parent)
  1256. return -EINVAL;
  1257. mutex_lock(&kernfs_mutex);
  1258. error = -ENOENT;
  1259. if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
  1260. (new_parent->flags & KERNFS_EMPTY_DIR))
  1261. goto out;
  1262. error = 0;
  1263. if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
  1264. (strcmp(kn->name, new_name) == 0))
  1265. goto out; /* nothing to rename */
  1266. error = -EEXIST;
  1267. if (kernfs_find_ns(new_parent, new_name, new_ns))
  1268. goto out;
  1269. /* rename kernfs_node */
  1270. if (strcmp(kn->name, new_name) != 0) {
  1271. error = -ENOMEM;
  1272. new_name = kstrdup_const(new_name, GFP_KERNEL);
  1273. if (!new_name)
  1274. goto out;
  1275. } else {
  1276. new_name = NULL;
  1277. }
  1278. /*
  1279. * Move to the appropriate place in the appropriate directories rbtree.
  1280. */
  1281. kernfs_unlink_sibling(kn);
  1282. kernfs_get(new_parent);
  1283. /* rename_lock protects ->parent and ->name accessors */
  1284. spin_lock_irq(&kernfs_rename_lock);
  1285. old_parent = kn->parent;
  1286. kn->parent = new_parent;
  1287. kn->ns = new_ns;
  1288. if (new_name) {
  1289. old_name = kn->name;
  1290. kn->name = new_name;
  1291. }
  1292. spin_unlock_irq(&kernfs_rename_lock);
  1293. kn->hash = kernfs_name_hash(kn->name, kn->ns);
  1294. kernfs_link_sibling(kn);
  1295. kernfs_put(old_parent);
  1296. kfree_const(old_name);
  1297. error = 0;
  1298. out:
  1299. mutex_unlock(&kernfs_mutex);
  1300. return error;
  1301. }
  1302. /* Relationship between s_mode and the DT_xxx types */
  1303. static inline unsigned char dt_type(struct kernfs_node *kn)
  1304. {
  1305. return (kn->mode >> 12) & 15;
  1306. }
  1307. static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
  1308. {
  1309. kernfs_put(filp->private_data);
  1310. return 0;
  1311. }
  1312. static struct kernfs_node *kernfs_dir_pos(const void *ns,
  1313. struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
  1314. {
  1315. if (pos) {
  1316. int valid = kernfs_active(pos) &&
  1317. pos->parent == parent && hash == pos->hash;
  1318. kernfs_put(pos);
  1319. if (!valid)
  1320. pos = NULL;
  1321. }
  1322. if (!pos && (hash > 1) && (hash < INT_MAX)) {
  1323. struct rb_node *node = parent->dir.children.rb_node;
  1324. while (node) {
  1325. pos = rb_to_kn(node);
  1326. if (hash < pos->hash)
  1327. node = node->rb_left;
  1328. else if (hash > pos->hash)
  1329. node = node->rb_right;
  1330. else
  1331. break;
  1332. }
  1333. }
  1334. /* Skip over entries which are dying/dead or in the wrong namespace */
  1335. while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
  1336. struct rb_node *node = rb_next(&pos->rb);
  1337. if (!node)
  1338. pos = NULL;
  1339. else
  1340. pos = rb_to_kn(node);
  1341. }
  1342. return pos;
  1343. }
  1344. static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
  1345. struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
  1346. {
  1347. pos = kernfs_dir_pos(ns, parent, ino, pos);
  1348. if (pos) {
  1349. do {
  1350. struct rb_node *node = rb_next(&pos->rb);
  1351. if (!node)
  1352. pos = NULL;
  1353. else
  1354. pos = rb_to_kn(node);
  1355. } while (pos && (!kernfs_active(pos) || pos->ns != ns));
  1356. }
  1357. return pos;
  1358. }
  1359. static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
  1360. {
  1361. struct dentry *dentry = file->f_path.dentry;
  1362. struct kernfs_node *parent = dentry->d_fsdata;
  1363. struct kernfs_node *pos = file->private_data;
  1364. const void *ns = NULL;
  1365. if (!dir_emit_dots(file, ctx))
  1366. return 0;
  1367. mutex_lock(&kernfs_mutex);
  1368. if (kernfs_ns_enabled(parent))
  1369. ns = kernfs_info(dentry->d_sb)->ns;
  1370. for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
  1371. pos;
  1372. pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
  1373. const char *name = pos->name;
  1374. unsigned int type = dt_type(pos);
  1375. int len = strlen(name);
  1376. ino_t ino = pos->ino;
  1377. ctx->pos = pos->hash;
  1378. file->private_data = pos;
  1379. kernfs_get(pos);
  1380. mutex_unlock(&kernfs_mutex);
  1381. if (!dir_emit(ctx, name, len, ino, type))
  1382. return 0;
  1383. mutex_lock(&kernfs_mutex);
  1384. }
  1385. mutex_unlock(&kernfs_mutex);
  1386. file->private_data = NULL;
  1387. ctx->pos = INT_MAX;
  1388. return 0;
  1389. }
  1390. const struct file_operations kernfs_dir_fops = {
  1391. .read = generic_read_dir,
  1392. .iterate_shared = kernfs_fop_readdir,
  1393. .release = kernfs_dir_fop_release,
  1394. .llseek = generic_file_llseek,
  1395. };