dir.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664
  1. /*
  2. * fs/kernfs/dir.c - kernfs directory implementation
  3. *
  4. * Copyright (c) 2001-3 Patrick Mochel
  5. * Copyright (c) 2007 SUSE Linux Products GmbH
  6. * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
  7. *
  8. * This file is released under the GPLv2.
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/fs.h>
  12. #include <linux/namei.h>
  13. #include <linux/idr.h>
  14. #include <linux/slab.h>
  15. #include <linux/security.h>
  16. #include <linux/hash.h>
  17. #include "kernfs-internal.h"
  18. DEFINE_MUTEX(kernfs_mutex);
  19. static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
  20. static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
  21. #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
  22. static bool kernfs_active(struct kernfs_node *kn)
  23. {
  24. lockdep_assert_held(&kernfs_mutex);
  25. return atomic_read(&kn->active) >= 0;
  26. }
  27. static bool kernfs_lockdep(struct kernfs_node *kn)
  28. {
  29. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  30. return kn->flags & KERNFS_LOCKDEP;
  31. #else
  32. return false;
  33. #endif
  34. }
  35. static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
  36. {
  37. return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
  38. }
  39. /* kernfs_node_depth - compute depth from @from to @to */
  40. static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
  41. {
  42. size_t depth = 0;
  43. while (to->parent && to != from) {
  44. depth++;
  45. to = to->parent;
  46. }
  47. return depth;
  48. }
  49. static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
  50. struct kernfs_node *b)
  51. {
  52. size_t da, db;
  53. struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
  54. if (ra != rb)
  55. return NULL;
  56. da = kernfs_depth(ra->kn, a);
  57. db = kernfs_depth(rb->kn, b);
  58. while (da > db) {
  59. a = a->parent;
  60. da--;
  61. }
  62. while (db > da) {
  63. b = b->parent;
  64. db--;
  65. }
  66. /* worst case b and a will be the same at root */
  67. while (b != a) {
  68. b = b->parent;
  69. a = a->parent;
  70. }
  71. return a;
  72. }
  73. /**
  74. * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
  75. * where kn_from is treated as root of the path.
  76. * @kn_from: kernfs node which should be treated as root for the path
  77. * @kn_to: kernfs node to which path is needed
  78. * @buf: buffer to copy the path into
  79. * @buflen: size of @buf
  80. *
  81. * We need to handle couple of scenarios here:
  82. * [1] when @kn_from is an ancestor of @kn_to at some level
  83. * kn_from: /n1/n2/n3
  84. * kn_to: /n1/n2/n3/n4/n5
  85. * result: /n4/n5
  86. *
  87. * [2] when @kn_from is on a different hierarchy and we need to find common
  88. * ancestor between @kn_from and @kn_to.
  89. * kn_from: /n1/n2/n3/n4
  90. * kn_to: /n1/n2/n5
  91. * result: /../../n5
  92. * OR
  93. * kn_from: /n1/n2/n3/n4/n5 [depth=5]
  94. * kn_to: /n1/n2/n3 [depth=3]
  95. * result: /../..
  96. *
  97. * return value: length of the string. If greater than buflen,
  98. * then contents of buf are undefined. On error, -1 is returned.
  99. */
  100. static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
  101. struct kernfs_node *kn_from,
  102. char *buf, size_t buflen)
  103. {
  104. struct kernfs_node *kn, *common;
  105. const char parent_str[] = "/..";
  106. size_t depth_from, depth_to, len = 0, nlen = 0;
  107. char *p;
  108. int i;
  109. if (!kn_from)
  110. kn_from = kernfs_root(kn_to)->kn;
  111. if (kn_from == kn_to)
  112. return strlcpy(buf, "/", buflen);
  113. common = kernfs_common_ancestor(kn_from, kn_to);
  114. if (WARN_ON(!common))
  115. return -1;
  116. depth_to = kernfs_depth(common, kn_to);
  117. depth_from = kernfs_depth(common, kn_from);
  118. if (buf)
  119. buf[0] = '\0';
  120. for (i = 0; i < depth_from; i++)
  121. len += strlcpy(buf + len, parent_str,
  122. len < buflen ? buflen - len : 0);
  123. /* Calculate how many bytes we need for the rest */
  124. for (kn = kn_to; kn != common; kn = kn->parent)
  125. nlen += strlen(kn->name) + 1;
  126. if (len + nlen >= buflen)
  127. return len + nlen;
  128. p = buf + len + nlen;
  129. *p = '\0';
  130. for (kn = kn_to; kn != common; kn = kn->parent) {
  131. size_t tmp = strlen(kn->name);
  132. p -= tmp;
  133. memcpy(p, kn->name, tmp);
  134. *(--p) = '/';
  135. }
  136. return len + nlen;
  137. }
  138. /**
  139. * kernfs_name - obtain the name of a given node
  140. * @kn: kernfs_node of interest
  141. * @buf: buffer to copy @kn's name into
  142. * @buflen: size of @buf
  143. *
  144. * Copies the name of @kn into @buf of @buflen bytes. The behavior is
  145. * similar to strlcpy(). It returns the length of @kn's name and if @buf
  146. * isn't long enough, it's filled upto @buflen-1 and nul terminated.
  147. *
  148. * This function can be called from any context.
  149. */
  150. int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
  151. {
  152. unsigned long flags;
  153. int ret;
  154. spin_lock_irqsave(&kernfs_rename_lock, flags);
  155. ret = kernfs_name_locked(kn, buf, buflen);
  156. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  157. return ret;
  158. }
  159. /**
  160. * kernfs_path_len - determine the length of the full path of a given node
  161. * @kn: kernfs_node of interest
  162. *
  163. * The returned length doesn't include the space for the terminating '\0'.
  164. */
  165. size_t kernfs_path_len(struct kernfs_node *kn)
  166. {
  167. size_t len = 0;
  168. unsigned long flags;
  169. spin_lock_irqsave(&kernfs_rename_lock, flags);
  170. do {
  171. len += strlen(kn->name) + 1;
  172. kn = kn->parent;
  173. } while (kn && kn->parent);
  174. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  175. return len;
  176. }
  177. /**
  178. * kernfs_path_from_node - build path of node @to relative to @from.
  179. * @from: parent kernfs_node relative to which we need to build the path
  180. * @to: kernfs_node of interest
  181. * @buf: buffer to copy @to's path into
  182. * @buflen: size of @buf
  183. *
  184. * Builds @to's path relative to @from in @buf. @from and @to must
  185. * be on the same kernfs-root. If @from is not parent of @to, then a relative
  186. * path (which includes '..'s) as needed to reach from @from to @to is
  187. * returned.
  188. *
  189. * If @buf isn't long enough, the return value will be greater than @buflen
  190. * and @buf contents are undefined.
  191. */
  192. int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
  193. char *buf, size_t buflen)
  194. {
  195. unsigned long flags;
  196. int ret;
  197. spin_lock_irqsave(&kernfs_rename_lock, flags);
  198. ret = kernfs_path_from_node_locked(to, from, buf, buflen);
  199. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  200. return ret;
  201. }
  202. EXPORT_SYMBOL_GPL(kernfs_path_from_node);
  203. /**
  204. * kernfs_path - build full path of a given node
  205. * @kn: kernfs_node of interest
  206. * @buf: buffer to copy @kn's name into
  207. * @buflen: size of @buf
  208. *
  209. * Builds and returns the full path of @kn in @buf of @buflen bytes. The
  210. * path is built from the end of @buf so the returned pointer usually
  211. * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
  212. * and %NULL is returned.
  213. */
  214. char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
  215. {
  216. int ret;
  217. ret = kernfs_path_from_node(kn, NULL, buf, buflen);
  218. if (ret < 0 || ret >= buflen)
  219. return NULL;
  220. return buf;
  221. }
  222. EXPORT_SYMBOL_GPL(kernfs_path);
  223. /**
  224. * pr_cont_kernfs_name - pr_cont name of a kernfs_node
  225. * @kn: kernfs_node of interest
  226. *
  227. * This function can be called from any context.
  228. */
  229. void pr_cont_kernfs_name(struct kernfs_node *kn)
  230. {
  231. unsigned long flags;
  232. spin_lock_irqsave(&kernfs_rename_lock, flags);
  233. kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
  234. pr_cont("%s", kernfs_pr_cont_buf);
  235. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  236. }
  237. /**
  238. * pr_cont_kernfs_path - pr_cont path of a kernfs_node
  239. * @kn: kernfs_node of interest
  240. *
  241. * This function can be called from any context.
  242. */
  243. void pr_cont_kernfs_path(struct kernfs_node *kn)
  244. {
  245. unsigned long flags;
  246. int sz;
  247. spin_lock_irqsave(&kernfs_rename_lock, flags);
  248. sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
  249. sizeof(kernfs_pr_cont_buf));
  250. if (sz < 0) {
  251. pr_cont("(error)");
  252. goto out;
  253. }
  254. if (sz >= sizeof(kernfs_pr_cont_buf)) {
  255. pr_cont("(name too long)");
  256. goto out;
  257. }
  258. pr_cont("%s", kernfs_pr_cont_buf);
  259. out:
  260. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  261. }
  262. /**
  263. * kernfs_get_parent - determine the parent node and pin it
  264. * @kn: kernfs_node of interest
  265. *
  266. * Determines @kn's parent, pins and returns it. This function can be
  267. * called from any context.
  268. */
  269. struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
  270. {
  271. struct kernfs_node *parent;
  272. unsigned long flags;
  273. spin_lock_irqsave(&kernfs_rename_lock, flags);
  274. parent = kn->parent;
  275. kernfs_get(parent);
  276. spin_unlock_irqrestore(&kernfs_rename_lock, flags);
  277. return parent;
  278. }
  279. /**
  280. * kernfs_name_hash
  281. * @name: Null terminated string to hash
  282. * @ns: Namespace tag to hash
  283. *
  284. * Returns 31 bit hash of ns + name (so it fits in an off_t )
  285. */
  286. static unsigned int kernfs_name_hash(const char *name, const void *ns)
  287. {
  288. unsigned long hash = init_name_hash();
  289. unsigned int len = strlen(name);
  290. while (len--)
  291. hash = partial_name_hash(*name++, hash);
  292. hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
  293. hash &= 0x7fffffffU;
  294. /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
  295. if (hash < 2)
  296. hash += 2;
  297. if (hash >= INT_MAX)
  298. hash = INT_MAX - 1;
  299. return hash;
  300. }
  301. static int kernfs_name_compare(unsigned int hash, const char *name,
  302. const void *ns, const struct kernfs_node *kn)
  303. {
  304. if (hash < kn->hash)
  305. return -1;
  306. if (hash > kn->hash)
  307. return 1;
  308. if (ns < kn->ns)
  309. return -1;
  310. if (ns > kn->ns)
  311. return 1;
  312. return strcmp(name, kn->name);
  313. }
  314. static int kernfs_sd_compare(const struct kernfs_node *left,
  315. const struct kernfs_node *right)
  316. {
  317. return kernfs_name_compare(left->hash, left->name, left->ns, right);
  318. }
  319. /**
  320. * kernfs_link_sibling - link kernfs_node into sibling rbtree
  321. * @kn: kernfs_node of interest
  322. *
  323. * Link @kn into its sibling rbtree which starts from
  324. * @kn->parent->dir.children.
  325. *
  326. * Locking:
  327. * mutex_lock(kernfs_mutex)
  328. *
  329. * RETURNS:
  330. * 0 on susccess -EEXIST on failure.
  331. */
  332. static int kernfs_link_sibling(struct kernfs_node *kn)
  333. {
  334. struct rb_node **node = &kn->parent->dir.children.rb_node;
  335. struct rb_node *parent = NULL;
  336. while (*node) {
  337. struct kernfs_node *pos;
  338. int result;
  339. pos = rb_to_kn(*node);
  340. parent = *node;
  341. result = kernfs_sd_compare(kn, pos);
  342. if (result < 0)
  343. node = &pos->rb.rb_left;
  344. else if (result > 0)
  345. node = &pos->rb.rb_right;
  346. else
  347. return -EEXIST;
  348. }
  349. /* add new node and rebalance the tree */
  350. rb_link_node(&kn->rb, parent, node);
  351. rb_insert_color(&kn->rb, &kn->parent->dir.children);
  352. /* successfully added, account subdir number */
  353. if (kernfs_type(kn) == KERNFS_DIR)
  354. kn->parent->dir.subdirs++;
  355. return 0;
  356. }
  357. /**
  358. * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
  359. * @kn: kernfs_node of interest
  360. *
  361. * Try to unlink @kn from its sibling rbtree which starts from
  362. * kn->parent->dir.children. Returns %true if @kn was actually
  363. * removed, %false if @kn wasn't on the rbtree.
  364. *
  365. * Locking:
  366. * mutex_lock(kernfs_mutex)
  367. */
  368. static bool kernfs_unlink_sibling(struct kernfs_node *kn)
  369. {
  370. if (RB_EMPTY_NODE(&kn->rb))
  371. return false;
  372. if (kernfs_type(kn) == KERNFS_DIR)
  373. kn->parent->dir.subdirs--;
  374. rb_erase(&kn->rb, &kn->parent->dir.children);
  375. RB_CLEAR_NODE(&kn->rb);
  376. return true;
  377. }
  378. /**
  379. * kernfs_get_active - get an active reference to kernfs_node
  380. * @kn: kernfs_node to get an active reference to
  381. *
  382. * Get an active reference of @kn. This function is noop if @kn
  383. * is NULL.
  384. *
  385. * RETURNS:
  386. * Pointer to @kn on success, NULL on failure.
  387. */
  388. struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
  389. {
  390. if (unlikely(!kn))
  391. return NULL;
  392. if (!atomic_inc_unless_negative(&kn->active))
  393. return NULL;
  394. if (kernfs_lockdep(kn))
  395. rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
  396. return kn;
  397. }
  398. /**
  399. * kernfs_put_active - put an active reference to kernfs_node
  400. * @kn: kernfs_node to put an active reference to
  401. *
  402. * Put an active reference to @kn. This function is noop if @kn
  403. * is NULL.
  404. */
  405. void kernfs_put_active(struct kernfs_node *kn)
  406. {
  407. struct kernfs_root *root = kernfs_root(kn);
  408. int v;
  409. if (unlikely(!kn))
  410. return;
  411. if (kernfs_lockdep(kn))
  412. rwsem_release(&kn->dep_map, 1, _RET_IP_);
  413. v = atomic_dec_return(&kn->active);
  414. if (likely(v != KN_DEACTIVATED_BIAS))
  415. return;
  416. wake_up_all(&root->deactivate_waitq);
  417. }
  418. /**
  419. * kernfs_drain - drain kernfs_node
  420. * @kn: kernfs_node to drain
  421. *
  422. * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
  423. * removers may invoke this function concurrently on @kn and all will
  424. * return after draining is complete.
  425. */
  426. static void kernfs_drain(struct kernfs_node *kn)
  427. __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
  428. {
  429. struct kernfs_root *root = kernfs_root(kn);
  430. lockdep_assert_held(&kernfs_mutex);
  431. WARN_ON_ONCE(kernfs_active(kn));
  432. mutex_unlock(&kernfs_mutex);
  433. if (kernfs_lockdep(kn)) {
  434. rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
  435. if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
  436. lock_contended(&kn->dep_map, _RET_IP_);
  437. }
  438. /* but everyone should wait for draining */
  439. wait_event(root->deactivate_waitq,
  440. atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
  441. if (kernfs_lockdep(kn)) {
  442. lock_acquired(&kn->dep_map, _RET_IP_);
  443. rwsem_release(&kn->dep_map, 1, _RET_IP_);
  444. }
  445. kernfs_unmap_bin_file(kn);
  446. mutex_lock(&kernfs_mutex);
  447. }
  448. /**
  449. * kernfs_get - get a reference count on a kernfs_node
  450. * @kn: the target kernfs_node
  451. */
  452. void kernfs_get(struct kernfs_node *kn)
  453. {
  454. if (kn) {
  455. WARN_ON(!atomic_read(&kn->count));
  456. atomic_inc(&kn->count);
  457. }
  458. }
  459. EXPORT_SYMBOL_GPL(kernfs_get);
  460. /**
  461. * kernfs_put - put a reference count on a kernfs_node
  462. * @kn: the target kernfs_node
  463. *
  464. * Put a reference count of @kn and destroy it if it reached zero.
  465. */
  466. void kernfs_put(struct kernfs_node *kn)
  467. {
  468. struct kernfs_node *parent;
  469. struct kernfs_root *root;
  470. if (!kn || !atomic_dec_and_test(&kn->count))
  471. return;
  472. root = kernfs_root(kn);
  473. repeat:
  474. /*
  475. * Moving/renaming is always done while holding reference.
  476. * kn->parent won't change beneath us.
  477. */
  478. parent = kn->parent;
  479. WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
  480. "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
  481. parent ? parent->name : "", kn->name, atomic_read(&kn->active));
  482. if (kernfs_type(kn) == KERNFS_LINK)
  483. kernfs_put(kn->symlink.target_kn);
  484. kfree_const(kn->name);
  485. if (kn->iattr) {
  486. if (kn->iattr->ia_secdata)
  487. security_release_secctx(kn->iattr->ia_secdata,
  488. kn->iattr->ia_secdata_len);
  489. simple_xattrs_free(&kn->iattr->xattrs);
  490. }
  491. kfree(kn->iattr);
  492. ida_simple_remove(&root->ino_ida, kn->ino);
  493. kmem_cache_free(kernfs_node_cache, kn);
  494. kn = parent;
  495. if (kn) {
  496. if (atomic_dec_and_test(&kn->count))
  497. goto repeat;
  498. } else {
  499. /* just released the root kn, free @root too */
  500. ida_destroy(&root->ino_ida);
  501. kfree(root);
  502. }
  503. }
  504. EXPORT_SYMBOL_GPL(kernfs_put);
  505. static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
  506. {
  507. struct kernfs_node *kn;
  508. if (flags & LOOKUP_RCU)
  509. return -ECHILD;
  510. /* Always perform fresh lookup for negatives */
  511. if (d_really_is_negative(dentry))
  512. goto out_bad_unlocked;
  513. kn = dentry->d_fsdata;
  514. mutex_lock(&kernfs_mutex);
  515. /* The kernfs node has been deactivated */
  516. if (!kernfs_active(kn))
  517. goto out_bad;
  518. /* The kernfs node has been moved? */
  519. if (dentry->d_parent->d_fsdata != kn->parent)
  520. goto out_bad;
  521. /* The kernfs node has been renamed */
  522. if (strcmp(dentry->d_name.name, kn->name) != 0)
  523. goto out_bad;
  524. /* The kernfs node has been moved to a different namespace */
  525. if (kn->parent && kernfs_ns_enabled(kn->parent) &&
  526. kernfs_info(dentry->d_sb)->ns != kn->ns)
  527. goto out_bad;
  528. mutex_unlock(&kernfs_mutex);
  529. return 1;
  530. out_bad:
  531. mutex_unlock(&kernfs_mutex);
  532. out_bad_unlocked:
  533. return 0;
  534. }
  535. static void kernfs_dop_release(struct dentry *dentry)
  536. {
  537. kernfs_put(dentry->d_fsdata);
  538. }
  539. const struct dentry_operations kernfs_dops = {
  540. .d_revalidate = kernfs_dop_revalidate,
  541. .d_release = kernfs_dop_release,
  542. };
  543. /**
  544. * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
  545. * @dentry: the dentry in question
  546. *
  547. * Return the kernfs_node associated with @dentry. If @dentry is not a
  548. * kernfs one, %NULL is returned.
  549. *
  550. * While the returned kernfs_node will stay accessible as long as @dentry
  551. * is accessible, the returned node can be in any state and the caller is
  552. * fully responsible for determining what's accessible.
  553. */
  554. struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
  555. {
  556. if (dentry->d_sb->s_op == &kernfs_sops)
  557. return dentry->d_fsdata;
  558. return NULL;
  559. }
  560. static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
  561. const char *name, umode_t mode,
  562. unsigned flags)
  563. {
  564. struct kernfs_node *kn;
  565. int ret;
  566. name = kstrdup_const(name, GFP_KERNEL);
  567. if (!name)
  568. return NULL;
  569. kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
  570. if (!kn)
  571. goto err_out1;
  572. ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
  573. if (ret < 0)
  574. goto err_out2;
  575. kn->ino = ret;
  576. atomic_set(&kn->count, 1);
  577. atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
  578. RB_CLEAR_NODE(&kn->rb);
  579. kn->name = name;
  580. kn->mode = mode;
  581. kn->flags = flags;
  582. return kn;
  583. err_out2:
  584. kmem_cache_free(kernfs_node_cache, kn);
  585. err_out1:
  586. kfree_const(name);
  587. return NULL;
  588. }
  589. struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
  590. const char *name, umode_t mode,
  591. unsigned flags)
  592. {
  593. struct kernfs_node *kn;
  594. kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
  595. if (kn) {
  596. kernfs_get(parent);
  597. kn->parent = parent;
  598. }
  599. return kn;
  600. }
  601. /**
  602. * kernfs_add_one - add kernfs_node to parent without warning
  603. * @kn: kernfs_node to be added
  604. *
  605. * The caller must already have initialized @kn->parent. This
  606. * function increments nlink of the parent's inode if @kn is a
  607. * directory and link into the children list of the parent.
  608. *
  609. * RETURNS:
  610. * 0 on success, -EEXIST if entry with the given name already
  611. * exists.
  612. */
  613. int kernfs_add_one(struct kernfs_node *kn)
  614. {
  615. struct kernfs_node *parent = kn->parent;
  616. struct kernfs_iattrs *ps_iattr;
  617. bool has_ns;
  618. int ret;
  619. mutex_lock(&kernfs_mutex);
  620. ret = -EINVAL;
  621. has_ns = kernfs_ns_enabled(parent);
  622. if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
  623. has_ns ? "required" : "invalid", parent->name, kn->name))
  624. goto out_unlock;
  625. if (kernfs_type(parent) != KERNFS_DIR)
  626. goto out_unlock;
  627. ret = -ENOENT;
  628. if (parent->flags & KERNFS_EMPTY_DIR)
  629. goto out_unlock;
  630. if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
  631. goto out_unlock;
  632. kn->hash = kernfs_name_hash(kn->name, kn->ns);
  633. ret = kernfs_link_sibling(kn);
  634. if (ret)
  635. goto out_unlock;
  636. /* Update timestamps on the parent */
  637. ps_iattr = parent->iattr;
  638. if (ps_iattr) {
  639. struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
  640. ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
  641. }
  642. mutex_unlock(&kernfs_mutex);
  643. /*
  644. * Activate the new node unless CREATE_DEACTIVATED is requested.
  645. * If not activated here, the kernfs user is responsible for
  646. * activating the node with kernfs_activate(). A node which hasn't
  647. * been activated is not visible to userland and its removal won't
  648. * trigger deactivation.
  649. */
  650. if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
  651. kernfs_activate(kn);
  652. return 0;
  653. out_unlock:
  654. mutex_unlock(&kernfs_mutex);
  655. return ret;
  656. }
  657. /**
  658. * kernfs_find_ns - find kernfs_node with the given name
  659. * @parent: kernfs_node to search under
  660. * @name: name to look for
  661. * @ns: the namespace tag to use
  662. *
  663. * Look for kernfs_node with name @name under @parent. Returns pointer to
  664. * the found kernfs_node on success, %NULL on failure.
  665. */
  666. static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
  667. const unsigned char *name,
  668. const void *ns)
  669. {
  670. struct rb_node *node = parent->dir.children.rb_node;
  671. bool has_ns = kernfs_ns_enabled(parent);
  672. unsigned int hash;
  673. lockdep_assert_held(&kernfs_mutex);
  674. if (has_ns != (bool)ns) {
  675. WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
  676. has_ns ? "required" : "invalid", parent->name, name);
  677. return NULL;
  678. }
  679. hash = kernfs_name_hash(name, ns);
  680. while (node) {
  681. struct kernfs_node *kn;
  682. int result;
  683. kn = rb_to_kn(node);
  684. result = kernfs_name_compare(hash, name, ns, kn);
  685. if (result < 0)
  686. node = node->rb_left;
  687. else if (result > 0)
  688. node = node->rb_right;
  689. else
  690. return kn;
  691. }
  692. return NULL;
  693. }
  694. static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
  695. const unsigned char *path,
  696. const void *ns)
  697. {
  698. size_t len;
  699. char *p, *name;
  700. lockdep_assert_held(&kernfs_mutex);
  701. /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
  702. spin_lock_irq(&kernfs_rename_lock);
  703. len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
  704. if (len >= sizeof(kernfs_pr_cont_buf)) {
  705. spin_unlock_irq(&kernfs_rename_lock);
  706. return NULL;
  707. }
  708. p = kernfs_pr_cont_buf;
  709. while ((name = strsep(&p, "/")) && parent) {
  710. if (*name == '\0')
  711. continue;
  712. parent = kernfs_find_ns(parent, name, ns);
  713. }
  714. spin_unlock_irq(&kernfs_rename_lock);
  715. return parent;
  716. }
  717. /**
  718. * kernfs_find_and_get_ns - find and get kernfs_node with the given name
  719. * @parent: kernfs_node to search under
  720. * @name: name to look for
  721. * @ns: the namespace tag to use
  722. *
  723. * Look for kernfs_node with name @name under @parent and get a reference
  724. * if found. This function may sleep and returns pointer to the found
  725. * kernfs_node on success, %NULL on failure.
  726. */
  727. struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
  728. const char *name, const void *ns)
  729. {
  730. struct kernfs_node *kn;
  731. mutex_lock(&kernfs_mutex);
  732. kn = kernfs_find_ns(parent, name, ns);
  733. kernfs_get(kn);
  734. mutex_unlock(&kernfs_mutex);
  735. return kn;
  736. }
  737. EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
  738. /**
  739. * kernfs_walk_and_get_ns - find and get kernfs_node with the given path
  740. * @parent: kernfs_node to search under
  741. * @path: path to look for
  742. * @ns: the namespace tag to use
  743. *
  744. * Look for kernfs_node with path @path under @parent and get a reference
  745. * if found. This function may sleep and returns pointer to the found
  746. * kernfs_node on success, %NULL on failure.
  747. */
  748. struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
  749. const char *path, const void *ns)
  750. {
  751. struct kernfs_node *kn;
  752. mutex_lock(&kernfs_mutex);
  753. kn = kernfs_walk_ns(parent, path, ns);
  754. kernfs_get(kn);
  755. mutex_unlock(&kernfs_mutex);
  756. return kn;
  757. }
  758. /**
  759. * kernfs_create_root - create a new kernfs hierarchy
  760. * @scops: optional syscall operations for the hierarchy
  761. * @flags: KERNFS_ROOT_* flags
  762. * @priv: opaque data associated with the new directory
  763. *
  764. * Returns the root of the new hierarchy on success, ERR_PTR() value on
  765. * failure.
  766. */
  767. struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
  768. unsigned int flags, void *priv)
  769. {
  770. struct kernfs_root *root;
  771. struct kernfs_node *kn;
  772. root = kzalloc(sizeof(*root), GFP_KERNEL);
  773. if (!root)
  774. return ERR_PTR(-ENOMEM);
  775. ida_init(&root->ino_ida);
  776. INIT_LIST_HEAD(&root->supers);
  777. kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
  778. KERNFS_DIR);
  779. if (!kn) {
  780. ida_destroy(&root->ino_ida);
  781. kfree(root);
  782. return ERR_PTR(-ENOMEM);
  783. }
  784. kn->priv = priv;
  785. kn->dir.root = root;
  786. root->syscall_ops = scops;
  787. root->flags = flags;
  788. root->kn = kn;
  789. init_waitqueue_head(&root->deactivate_waitq);
  790. if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
  791. kernfs_activate(kn);
  792. return root;
  793. }
  794. /**
  795. * kernfs_destroy_root - destroy a kernfs hierarchy
  796. * @root: root of the hierarchy to destroy
  797. *
  798. * Destroy the hierarchy anchored at @root by removing all existing
  799. * directories and destroying @root.
  800. */
  801. void kernfs_destroy_root(struct kernfs_root *root)
  802. {
  803. kernfs_remove(root->kn); /* will also free @root */
  804. }
  805. /**
  806. * kernfs_create_dir_ns - create a directory
  807. * @parent: parent in which to create a new directory
  808. * @name: name of the new directory
  809. * @mode: mode of the new directory
  810. * @priv: opaque data associated with the new directory
  811. * @ns: optional namespace tag of the directory
  812. *
  813. * Returns the created node on success, ERR_PTR() value on failure.
  814. */
  815. struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
  816. const char *name, umode_t mode,
  817. void *priv, const void *ns)
  818. {
  819. struct kernfs_node *kn;
  820. int rc;
  821. /* allocate */
  822. kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
  823. if (!kn)
  824. return ERR_PTR(-ENOMEM);
  825. kn->dir.root = parent->dir.root;
  826. kn->ns = ns;
  827. kn->priv = priv;
  828. /* link in */
  829. rc = kernfs_add_one(kn);
  830. if (!rc)
  831. return kn;
  832. kernfs_put(kn);
  833. return ERR_PTR(rc);
  834. }
  835. /**
  836. * kernfs_create_empty_dir - create an always empty directory
  837. * @parent: parent in which to create a new directory
  838. * @name: name of the new directory
  839. *
  840. * Returns the created node on success, ERR_PTR() value on failure.
  841. */
  842. struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
  843. const char *name)
  844. {
  845. struct kernfs_node *kn;
  846. int rc;
  847. /* allocate */
  848. kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
  849. if (!kn)
  850. return ERR_PTR(-ENOMEM);
  851. kn->flags |= KERNFS_EMPTY_DIR;
  852. kn->dir.root = parent->dir.root;
  853. kn->ns = NULL;
  854. kn->priv = NULL;
  855. /* link in */
  856. rc = kernfs_add_one(kn);
  857. if (!rc)
  858. return kn;
  859. kernfs_put(kn);
  860. return ERR_PTR(rc);
  861. }
  862. static struct dentry *kernfs_iop_lookup(struct inode *dir,
  863. struct dentry *dentry,
  864. unsigned int flags)
  865. {
  866. struct dentry *ret;
  867. struct kernfs_node *parent = dentry->d_parent->d_fsdata;
  868. struct kernfs_node *kn;
  869. struct inode *inode;
  870. const void *ns = NULL;
  871. mutex_lock(&kernfs_mutex);
  872. if (kernfs_ns_enabled(parent))
  873. ns = kernfs_info(dir->i_sb)->ns;
  874. kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
  875. /* no such entry */
  876. if (!kn || !kernfs_active(kn)) {
  877. ret = NULL;
  878. goto out_unlock;
  879. }
  880. kernfs_get(kn);
  881. dentry->d_fsdata = kn;
  882. /* attach dentry and inode */
  883. inode = kernfs_get_inode(dir->i_sb, kn);
  884. if (!inode) {
  885. ret = ERR_PTR(-ENOMEM);
  886. goto out_unlock;
  887. }
  888. /* instantiate and hash dentry */
  889. ret = d_splice_alias(inode, dentry);
  890. out_unlock:
  891. mutex_unlock(&kernfs_mutex);
  892. return ret;
  893. }
  894. static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
  895. umode_t mode)
  896. {
  897. struct kernfs_node *parent = dir->i_private;
  898. struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
  899. int ret;
  900. if (!scops || !scops->mkdir)
  901. return -EPERM;
  902. if (!kernfs_get_active(parent))
  903. return -ENODEV;
  904. ret = scops->mkdir(parent, dentry->d_name.name, mode);
  905. kernfs_put_active(parent);
  906. return ret;
  907. }
  908. static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
  909. {
  910. struct kernfs_node *kn = dentry->d_fsdata;
  911. struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
  912. int ret;
  913. if (!scops || !scops->rmdir)
  914. return -EPERM;
  915. if (!kernfs_get_active(kn))
  916. return -ENODEV;
  917. ret = scops->rmdir(kn);
  918. kernfs_put_active(kn);
  919. return ret;
  920. }
  921. static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
  922. struct inode *new_dir, struct dentry *new_dentry)
  923. {
  924. struct kernfs_node *kn = old_dentry->d_fsdata;
  925. struct kernfs_node *new_parent = new_dir->i_private;
  926. struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
  927. int ret;
  928. if (!scops || !scops->rename)
  929. return -EPERM;
  930. if (!kernfs_get_active(kn))
  931. return -ENODEV;
  932. if (!kernfs_get_active(new_parent)) {
  933. kernfs_put_active(kn);
  934. return -ENODEV;
  935. }
  936. ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
  937. kernfs_put_active(new_parent);
  938. kernfs_put_active(kn);
  939. return ret;
  940. }
  941. const struct inode_operations kernfs_dir_iops = {
  942. .lookup = kernfs_iop_lookup,
  943. .permission = kernfs_iop_permission,
  944. .setattr = kernfs_iop_setattr,
  945. .getattr = kernfs_iop_getattr,
  946. .setxattr = kernfs_iop_setxattr,
  947. .removexattr = kernfs_iop_removexattr,
  948. .getxattr = kernfs_iop_getxattr,
  949. .listxattr = kernfs_iop_listxattr,
  950. .mkdir = kernfs_iop_mkdir,
  951. .rmdir = kernfs_iop_rmdir,
  952. .rename = kernfs_iop_rename,
  953. };
  954. static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
  955. {
  956. struct kernfs_node *last;
  957. while (true) {
  958. struct rb_node *rbn;
  959. last = pos;
  960. if (kernfs_type(pos) != KERNFS_DIR)
  961. break;
  962. rbn = rb_first(&pos->dir.children);
  963. if (!rbn)
  964. break;
  965. pos = rb_to_kn(rbn);
  966. }
  967. return last;
  968. }
  969. /**
  970. * kernfs_next_descendant_post - find the next descendant for post-order walk
  971. * @pos: the current position (%NULL to initiate traversal)
  972. * @root: kernfs_node whose descendants to walk
  973. *
  974. * Find the next descendant to visit for post-order traversal of @root's
  975. * descendants. @root is included in the iteration and the last node to be
  976. * visited.
  977. */
  978. static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
  979. struct kernfs_node *root)
  980. {
  981. struct rb_node *rbn;
  982. lockdep_assert_held(&kernfs_mutex);
  983. /* if first iteration, visit leftmost descendant which may be root */
  984. if (!pos)
  985. return kernfs_leftmost_descendant(root);
  986. /* if we visited @root, we're done */
  987. if (pos == root)
  988. return NULL;
  989. /* if there's an unvisited sibling, visit its leftmost descendant */
  990. rbn = rb_next(&pos->rb);
  991. if (rbn)
  992. return kernfs_leftmost_descendant(rb_to_kn(rbn));
  993. /* no sibling left, visit parent */
  994. return pos->parent;
  995. }
  996. /**
  997. * kernfs_activate - activate a node which started deactivated
  998. * @kn: kernfs_node whose subtree is to be activated
  999. *
  1000. * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
  1001. * needs to be explicitly activated. A node which hasn't been activated
  1002. * isn't visible to userland and deactivation is skipped during its
  1003. * removal. This is useful to construct atomic init sequences where
  1004. * creation of multiple nodes should either succeed or fail atomically.
  1005. *
  1006. * The caller is responsible for ensuring that this function is not called
  1007. * after kernfs_remove*() is invoked on @kn.
  1008. */
  1009. void kernfs_activate(struct kernfs_node *kn)
  1010. {
  1011. struct kernfs_node *pos;
  1012. mutex_lock(&kernfs_mutex);
  1013. pos = NULL;
  1014. while ((pos = kernfs_next_descendant_post(pos, kn))) {
  1015. if (!pos || (pos->flags & KERNFS_ACTIVATED))
  1016. continue;
  1017. WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
  1018. WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
  1019. atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
  1020. pos->flags |= KERNFS_ACTIVATED;
  1021. }
  1022. mutex_unlock(&kernfs_mutex);
  1023. }
  1024. static void __kernfs_remove(struct kernfs_node *kn)
  1025. {
  1026. struct kernfs_node *pos;
  1027. lockdep_assert_held(&kernfs_mutex);
  1028. /*
  1029. * Short-circuit if non-root @kn has already finished removal.
  1030. * This is for kernfs_remove_self() which plays with active ref
  1031. * after removal.
  1032. */
  1033. if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
  1034. return;
  1035. pr_debug("kernfs %s: removing\n", kn->name);
  1036. /* prevent any new usage under @kn by deactivating all nodes */
  1037. pos = NULL;
  1038. while ((pos = kernfs_next_descendant_post(pos, kn)))
  1039. if (kernfs_active(pos))
  1040. atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
  1041. /* deactivate and unlink the subtree node-by-node */
  1042. do {
  1043. pos = kernfs_leftmost_descendant(kn);
  1044. /*
  1045. * kernfs_drain() drops kernfs_mutex temporarily and @pos's
  1046. * base ref could have been put by someone else by the time
  1047. * the function returns. Make sure it doesn't go away
  1048. * underneath us.
  1049. */
  1050. kernfs_get(pos);
  1051. /*
  1052. * Drain iff @kn was activated. This avoids draining and
  1053. * its lockdep annotations for nodes which have never been
  1054. * activated and allows embedding kernfs_remove() in create
  1055. * error paths without worrying about draining.
  1056. */
  1057. if (kn->flags & KERNFS_ACTIVATED)
  1058. kernfs_drain(pos);
  1059. else
  1060. WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
  1061. /*
  1062. * kernfs_unlink_sibling() succeeds once per node. Use it
  1063. * to decide who's responsible for cleanups.
  1064. */
  1065. if (!pos->parent || kernfs_unlink_sibling(pos)) {
  1066. struct kernfs_iattrs *ps_iattr =
  1067. pos->parent ? pos->parent->iattr : NULL;
  1068. /* update timestamps on the parent */
  1069. if (ps_iattr) {
  1070. ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
  1071. ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
  1072. }
  1073. kernfs_put(pos);
  1074. }
  1075. kernfs_put(pos);
  1076. } while (pos != kn);
  1077. }
  1078. /**
  1079. * kernfs_remove - remove a kernfs_node recursively
  1080. * @kn: the kernfs_node to remove
  1081. *
  1082. * Remove @kn along with all its subdirectories and files.
  1083. */
  1084. void kernfs_remove(struct kernfs_node *kn)
  1085. {
  1086. mutex_lock(&kernfs_mutex);
  1087. __kernfs_remove(kn);
  1088. mutex_unlock(&kernfs_mutex);
  1089. }
  1090. /**
  1091. * kernfs_break_active_protection - break out of active protection
  1092. * @kn: the self kernfs_node
  1093. *
  1094. * The caller must be running off of a kernfs operation which is invoked
  1095. * with an active reference - e.g. one of kernfs_ops. Each invocation of
  1096. * this function must also be matched with an invocation of
  1097. * kernfs_unbreak_active_protection().
  1098. *
  1099. * This function releases the active reference of @kn the caller is
  1100. * holding. Once this function is called, @kn may be removed at any point
  1101. * and the caller is solely responsible for ensuring that the objects it
  1102. * dereferences are accessible.
  1103. */
  1104. void kernfs_break_active_protection(struct kernfs_node *kn)
  1105. {
  1106. /*
  1107. * Take out ourself out of the active ref dependency chain. If
  1108. * we're called without an active ref, lockdep will complain.
  1109. */
  1110. kernfs_put_active(kn);
  1111. }
  1112. /**
  1113. * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
  1114. * @kn: the self kernfs_node
  1115. *
  1116. * If kernfs_break_active_protection() was called, this function must be
  1117. * invoked before finishing the kernfs operation. Note that while this
  1118. * function restores the active reference, it doesn't and can't actually
  1119. * restore the active protection - @kn may already or be in the process of
  1120. * being removed. Once kernfs_break_active_protection() is invoked, that
  1121. * protection is irreversibly gone for the kernfs operation instance.
  1122. *
  1123. * While this function may be called at any point after
  1124. * kernfs_break_active_protection() is invoked, its most useful location
  1125. * would be right before the enclosing kernfs operation returns.
  1126. */
  1127. void kernfs_unbreak_active_protection(struct kernfs_node *kn)
  1128. {
  1129. /*
  1130. * @kn->active could be in any state; however, the increment we do
  1131. * here will be undone as soon as the enclosing kernfs operation
  1132. * finishes and this temporary bump can't break anything. If @kn
  1133. * is alive, nothing changes. If @kn is being deactivated, the
  1134. * soon-to-follow put will either finish deactivation or restore
  1135. * deactivated state. If @kn is already removed, the temporary
  1136. * bump is guaranteed to be gone before @kn is released.
  1137. */
  1138. atomic_inc(&kn->active);
  1139. if (kernfs_lockdep(kn))
  1140. rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
  1141. }
  1142. /**
  1143. * kernfs_remove_self - remove a kernfs_node from its own method
  1144. * @kn: the self kernfs_node to remove
  1145. *
  1146. * The caller must be running off of a kernfs operation which is invoked
  1147. * with an active reference - e.g. one of kernfs_ops. This can be used to
  1148. * implement a file operation which deletes itself.
  1149. *
  1150. * For example, the "delete" file for a sysfs device directory can be
  1151. * implemented by invoking kernfs_remove_self() on the "delete" file
  1152. * itself. This function breaks the circular dependency of trying to
  1153. * deactivate self while holding an active ref itself. It isn't necessary
  1154. * to modify the usual removal path to use kernfs_remove_self(). The
  1155. * "delete" implementation can simply invoke kernfs_remove_self() on self
  1156. * before proceeding with the usual removal path. kernfs will ignore later
  1157. * kernfs_remove() on self.
  1158. *
  1159. * kernfs_remove_self() can be called multiple times concurrently on the
  1160. * same kernfs_node. Only the first one actually performs removal and
  1161. * returns %true. All others will wait until the kernfs operation which
  1162. * won self-removal finishes and return %false. Note that the losers wait
  1163. * for the completion of not only the winning kernfs_remove_self() but also
  1164. * the whole kernfs_ops which won the arbitration. This can be used to
  1165. * guarantee, for example, all concurrent writes to a "delete" file to
  1166. * finish only after the whole operation is complete.
  1167. */
  1168. bool kernfs_remove_self(struct kernfs_node *kn)
  1169. {
  1170. bool ret;
  1171. mutex_lock(&kernfs_mutex);
  1172. kernfs_break_active_protection(kn);
  1173. /*
  1174. * SUICIDAL is used to arbitrate among competing invocations. Only
  1175. * the first one will actually perform removal. When the removal
  1176. * is complete, SUICIDED is set and the active ref is restored
  1177. * while holding kernfs_mutex. The ones which lost arbitration
  1178. * waits for SUICDED && drained which can happen only after the
  1179. * enclosing kernfs operation which executed the winning instance
  1180. * of kernfs_remove_self() finished.
  1181. */
  1182. if (!(kn->flags & KERNFS_SUICIDAL)) {
  1183. kn->flags |= KERNFS_SUICIDAL;
  1184. __kernfs_remove(kn);
  1185. kn->flags |= KERNFS_SUICIDED;
  1186. ret = true;
  1187. } else {
  1188. wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
  1189. DEFINE_WAIT(wait);
  1190. while (true) {
  1191. prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
  1192. if ((kn->flags & KERNFS_SUICIDED) &&
  1193. atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
  1194. break;
  1195. mutex_unlock(&kernfs_mutex);
  1196. schedule();
  1197. mutex_lock(&kernfs_mutex);
  1198. }
  1199. finish_wait(waitq, &wait);
  1200. WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
  1201. ret = false;
  1202. }
  1203. /*
  1204. * This must be done while holding kernfs_mutex; otherwise, waiting
  1205. * for SUICIDED && deactivated could finish prematurely.
  1206. */
  1207. kernfs_unbreak_active_protection(kn);
  1208. mutex_unlock(&kernfs_mutex);
  1209. return ret;
  1210. }
  1211. /**
  1212. * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
  1213. * @parent: parent of the target
  1214. * @name: name of the kernfs_node to remove
  1215. * @ns: namespace tag of the kernfs_node to remove
  1216. *
  1217. * Look for the kernfs_node with @name and @ns under @parent and remove it.
  1218. * Returns 0 on success, -ENOENT if such entry doesn't exist.
  1219. */
  1220. int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
  1221. const void *ns)
  1222. {
  1223. struct kernfs_node *kn;
  1224. if (!parent) {
  1225. WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
  1226. name);
  1227. return -ENOENT;
  1228. }
  1229. mutex_lock(&kernfs_mutex);
  1230. kn = kernfs_find_ns(parent, name, ns);
  1231. if (kn)
  1232. __kernfs_remove(kn);
  1233. mutex_unlock(&kernfs_mutex);
  1234. if (kn)
  1235. return 0;
  1236. else
  1237. return -ENOENT;
  1238. }
  1239. /**
  1240. * kernfs_rename_ns - move and rename a kernfs_node
  1241. * @kn: target node
  1242. * @new_parent: new parent to put @sd under
  1243. * @new_name: new name
  1244. * @new_ns: new namespace tag
  1245. */
  1246. int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
  1247. const char *new_name, const void *new_ns)
  1248. {
  1249. struct kernfs_node *old_parent;
  1250. const char *old_name = NULL;
  1251. int error;
  1252. /* can't move or rename root */
  1253. if (!kn->parent)
  1254. return -EINVAL;
  1255. mutex_lock(&kernfs_mutex);
  1256. error = -ENOENT;
  1257. if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
  1258. (new_parent->flags & KERNFS_EMPTY_DIR))
  1259. goto out;
  1260. error = 0;
  1261. if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
  1262. (strcmp(kn->name, new_name) == 0))
  1263. goto out; /* nothing to rename */
  1264. error = -EEXIST;
  1265. if (kernfs_find_ns(new_parent, new_name, new_ns))
  1266. goto out;
  1267. /* rename kernfs_node */
  1268. if (strcmp(kn->name, new_name) != 0) {
  1269. error = -ENOMEM;
  1270. new_name = kstrdup_const(new_name, GFP_KERNEL);
  1271. if (!new_name)
  1272. goto out;
  1273. } else {
  1274. new_name = NULL;
  1275. }
  1276. /*
  1277. * Move to the appropriate place in the appropriate directories rbtree.
  1278. */
  1279. kernfs_unlink_sibling(kn);
  1280. kernfs_get(new_parent);
  1281. /* rename_lock protects ->parent and ->name accessors */
  1282. spin_lock_irq(&kernfs_rename_lock);
  1283. old_parent = kn->parent;
  1284. kn->parent = new_parent;
  1285. kn->ns = new_ns;
  1286. if (new_name) {
  1287. old_name = kn->name;
  1288. kn->name = new_name;
  1289. }
  1290. spin_unlock_irq(&kernfs_rename_lock);
  1291. kn->hash = kernfs_name_hash(kn->name, kn->ns);
  1292. kernfs_link_sibling(kn);
  1293. kernfs_put(old_parent);
  1294. kfree_const(old_name);
  1295. error = 0;
  1296. out:
  1297. mutex_unlock(&kernfs_mutex);
  1298. return error;
  1299. }
  1300. /* Relationship between s_mode and the DT_xxx types */
  1301. static inline unsigned char dt_type(struct kernfs_node *kn)
  1302. {
  1303. return (kn->mode >> 12) & 15;
  1304. }
  1305. static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
  1306. {
  1307. kernfs_put(filp->private_data);
  1308. return 0;
  1309. }
  1310. static struct kernfs_node *kernfs_dir_pos(const void *ns,
  1311. struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
  1312. {
  1313. if (pos) {
  1314. int valid = kernfs_active(pos) &&
  1315. pos->parent == parent && hash == pos->hash;
  1316. kernfs_put(pos);
  1317. if (!valid)
  1318. pos = NULL;
  1319. }
  1320. if (!pos && (hash > 1) && (hash < INT_MAX)) {
  1321. struct rb_node *node = parent->dir.children.rb_node;
  1322. while (node) {
  1323. pos = rb_to_kn(node);
  1324. if (hash < pos->hash)
  1325. node = node->rb_left;
  1326. else if (hash > pos->hash)
  1327. node = node->rb_right;
  1328. else
  1329. break;
  1330. }
  1331. }
  1332. /* Skip over entries which are dying/dead or in the wrong namespace */
  1333. while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
  1334. struct rb_node *node = rb_next(&pos->rb);
  1335. if (!node)
  1336. pos = NULL;
  1337. else
  1338. pos = rb_to_kn(node);
  1339. }
  1340. return pos;
  1341. }
  1342. static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
  1343. struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
  1344. {
  1345. pos = kernfs_dir_pos(ns, parent, ino, pos);
  1346. if (pos) {
  1347. do {
  1348. struct rb_node *node = rb_next(&pos->rb);
  1349. if (!node)
  1350. pos = NULL;
  1351. else
  1352. pos = rb_to_kn(node);
  1353. } while (pos && (!kernfs_active(pos) || pos->ns != ns));
  1354. }
  1355. return pos;
  1356. }
  1357. static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
  1358. {
  1359. struct dentry *dentry = file->f_path.dentry;
  1360. struct kernfs_node *parent = dentry->d_fsdata;
  1361. struct kernfs_node *pos = file->private_data;
  1362. const void *ns = NULL;
  1363. if (!dir_emit_dots(file, ctx))
  1364. return 0;
  1365. mutex_lock(&kernfs_mutex);
  1366. if (kernfs_ns_enabled(parent))
  1367. ns = kernfs_info(dentry->d_sb)->ns;
  1368. for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
  1369. pos;
  1370. pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
  1371. const char *name = pos->name;
  1372. unsigned int type = dt_type(pos);
  1373. int len = strlen(name);
  1374. ino_t ino = pos->ino;
  1375. ctx->pos = pos->hash;
  1376. file->private_data = pos;
  1377. kernfs_get(pos);
  1378. mutex_unlock(&kernfs_mutex);
  1379. if (!dir_emit(ctx, name, len, ino, type))
  1380. return 0;
  1381. mutex_lock(&kernfs_mutex);
  1382. }
  1383. mutex_unlock(&kernfs_mutex);
  1384. file->private_data = NULL;
  1385. ctx->pos = INT_MAX;
  1386. return 0;
  1387. }
  1388. static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
  1389. int whence)
  1390. {
  1391. struct inode *inode = file_inode(file);
  1392. loff_t ret;
  1393. inode_lock(inode);
  1394. ret = generic_file_llseek(file, offset, whence);
  1395. inode_unlock(inode);
  1396. return ret;
  1397. }
  1398. const struct file_operations kernfs_dir_fops = {
  1399. .read = generic_read_dir,
  1400. .iterate = kernfs_fop_readdir,
  1401. .release = kernfs_dir_fop_release,
  1402. .llseek = kernfs_dir_fop_llseek,
  1403. };