dir.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct dentry_operations ceph_dentry_ops;
  25. /*
  26. * Initialize ceph dentry state.
  27. */
  28. int ceph_init_dentry(struct dentry *dentry)
  29. {
  30. struct ceph_dentry_info *di;
  31. if (dentry->d_fsdata)
  32. return 0;
  33. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
  34. if (!di)
  35. return -ENOMEM; /* oh well */
  36. spin_lock(&dentry->d_lock);
  37. if (dentry->d_fsdata) {
  38. /* lost a race */
  39. kmem_cache_free(ceph_dentry_cachep, di);
  40. goto out_unlock;
  41. }
  42. if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
  43. d_set_d_op(dentry, &ceph_dentry_ops);
  44. else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
  45. d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
  46. else
  47. d_set_d_op(dentry, &ceph_snap_dentry_ops);
  48. di->dentry = dentry;
  49. di->lease_session = NULL;
  50. dentry->d_time = jiffies;
  51. /* avoid reordering d_fsdata setup so that the check above is safe */
  52. smp_mb();
  53. dentry->d_fsdata = di;
  54. ceph_dentry_lru_add(dentry);
  55. out_unlock:
  56. spin_unlock(&dentry->d_lock);
  57. return 0;
  58. }
  59. struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
  60. {
  61. struct inode *inode = NULL;
  62. if (!dentry)
  63. return NULL;
  64. spin_lock(&dentry->d_lock);
  65. if (!IS_ROOT(dentry)) {
  66. inode = d_inode(dentry->d_parent);
  67. ihold(inode);
  68. }
  69. spin_unlock(&dentry->d_lock);
  70. return inode;
  71. }
  72. /*
  73. * for readdir, we encode the directory frag and offset within that
  74. * frag into f_pos.
  75. */
  76. static unsigned fpos_frag(loff_t p)
  77. {
  78. return p >> 32;
  79. }
  80. static unsigned fpos_off(loff_t p)
  81. {
  82. return p & 0xffffffff;
  83. }
  84. static int fpos_cmp(loff_t l, loff_t r)
  85. {
  86. int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
  87. if (v)
  88. return v;
  89. return (int)(fpos_off(l) - fpos_off(r));
  90. }
  91. /*
  92. * When possible, we try to satisfy a readdir by peeking at the
  93. * dcache. We make this work by carefully ordering dentries on
  94. * d_child when we initially get results back from the MDS, and
  95. * falling back to a "normal" sync readdir if any dentries in the dir
  96. * are dropped.
  97. *
  98. * Complete dir indicates that we have all dentries in the dir. It is
  99. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  100. * the MDS if/when the directory is modified).
  101. */
  102. static int __dcache_readdir(struct file *file, struct dir_context *ctx,
  103. u32 shared_gen)
  104. {
  105. struct ceph_file_info *fi = file->private_data;
  106. struct dentry *parent = file->f_path.dentry;
  107. struct inode *dir = d_inode(parent);
  108. struct list_head *p;
  109. struct dentry *dentry, *last;
  110. struct ceph_dentry_info *di;
  111. int err = 0;
  112. /* claim ref on last dentry we returned */
  113. last = fi->dentry;
  114. fi->dentry = NULL;
  115. dout("__dcache_readdir %p v%u at %llu (last %p)\n",
  116. dir, shared_gen, ctx->pos, last);
  117. spin_lock(&parent->d_lock);
  118. /* start at beginning? */
  119. if (ctx->pos == 2 || last == NULL ||
  120. fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
  121. if (list_empty(&parent->d_subdirs))
  122. goto out_unlock;
  123. p = parent->d_subdirs.prev;
  124. dout(" initial p %p/%p\n", p->prev, p->next);
  125. } else {
  126. p = last->d_child.prev;
  127. }
  128. more:
  129. dentry = list_entry(p, struct dentry, d_child);
  130. di = ceph_dentry(dentry);
  131. while (1) {
  132. dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
  133. d_unhashed(dentry) ? "!hashed" : "hashed",
  134. parent->d_subdirs.prev, parent->d_subdirs.next);
  135. if (p == &parent->d_subdirs) {
  136. fi->flags |= CEPH_F_ATEND;
  137. goto out_unlock;
  138. }
  139. spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
  140. if (di->lease_shared_gen == shared_gen &&
  141. !d_unhashed(dentry) && d_really_is_positive(dentry) &&
  142. ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
  143. ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
  144. fpos_cmp(ctx->pos, di->offset) <= 0)
  145. break;
  146. dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry,
  147. dentry, di->offset,
  148. ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
  149. !d_inode(dentry) ? " null" : "");
  150. spin_unlock(&dentry->d_lock);
  151. p = p->prev;
  152. dentry = list_entry(p, struct dentry, d_child);
  153. di = ceph_dentry(dentry);
  154. }
  155. dget_dlock(dentry);
  156. spin_unlock(&dentry->d_lock);
  157. spin_unlock(&parent->d_lock);
  158. /* make sure a dentry wasn't dropped while we didn't have parent lock */
  159. if (!ceph_dir_is_complete_ordered(dir)) {
  160. dout(" lost dir complete on %p; falling back to mds\n", dir);
  161. dput(dentry);
  162. err = -EAGAIN;
  163. goto out;
  164. }
  165. dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
  166. dentry, dentry, d_inode(dentry));
  167. if (!dir_emit(ctx, dentry->d_name.name,
  168. dentry->d_name.len,
  169. ceph_translate_ino(dentry->d_sb, d_inode(dentry)->i_ino),
  170. d_inode(dentry)->i_mode >> 12)) {
  171. if (last) {
  172. /* remember our position */
  173. fi->dentry = last;
  174. fi->next_offset = fpos_off(di->offset);
  175. }
  176. dput(dentry);
  177. return 0;
  178. }
  179. ctx->pos = di->offset + 1;
  180. if (last)
  181. dput(last);
  182. last = dentry;
  183. spin_lock(&parent->d_lock);
  184. p = p->prev; /* advance to next dentry */
  185. goto more;
  186. out_unlock:
  187. spin_unlock(&parent->d_lock);
  188. out:
  189. if (last)
  190. dput(last);
  191. return err;
  192. }
  193. /*
  194. * make note of the last dentry we read, so we can
  195. * continue at the same lexicographical point,
  196. * regardless of what dir changes take place on the
  197. * server.
  198. */
  199. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  200. int len)
  201. {
  202. kfree(fi->last_name);
  203. fi->last_name = kmalloc(len+1, GFP_NOFS);
  204. if (!fi->last_name)
  205. return -ENOMEM;
  206. memcpy(fi->last_name, name, len);
  207. fi->last_name[len] = 0;
  208. dout("note_last_dentry '%s'\n", fi->last_name);
  209. return 0;
  210. }
  211. static int ceph_readdir(struct file *file, struct dir_context *ctx)
  212. {
  213. struct ceph_file_info *fi = file->private_data;
  214. struct inode *inode = file_inode(file);
  215. struct ceph_inode_info *ci = ceph_inode(inode);
  216. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  217. struct ceph_mds_client *mdsc = fsc->mdsc;
  218. unsigned frag = fpos_frag(ctx->pos);
  219. int off = fpos_off(ctx->pos);
  220. int err;
  221. u32 ftype;
  222. struct ceph_mds_reply_info_parsed *rinfo;
  223. dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
  224. if (fi->flags & CEPH_F_ATEND)
  225. return 0;
  226. /* always start with . and .. */
  227. if (ctx->pos == 0) {
  228. dout("readdir off 0 -> '.'\n");
  229. if (!dir_emit(ctx, ".", 1,
  230. ceph_translate_ino(inode->i_sb, inode->i_ino),
  231. inode->i_mode >> 12))
  232. return 0;
  233. ctx->pos = 1;
  234. off = 1;
  235. }
  236. if (ctx->pos == 1) {
  237. ino_t ino = parent_ino(file->f_path.dentry);
  238. dout("readdir off 1 -> '..'\n");
  239. if (!dir_emit(ctx, "..", 2,
  240. ceph_translate_ino(inode->i_sb, ino),
  241. inode->i_mode >> 12))
  242. return 0;
  243. ctx->pos = 2;
  244. off = 2;
  245. }
  246. /* can we use the dcache? */
  247. spin_lock(&ci->i_ceph_lock);
  248. if ((ctx->pos == 2 || fi->dentry) &&
  249. ceph_test_mount_opt(fsc, DCACHE) &&
  250. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  251. ceph_snap(inode) != CEPH_SNAPDIR &&
  252. __ceph_dir_is_complete_ordered(ci) &&
  253. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  254. u32 shared_gen = ci->i_shared_gen;
  255. spin_unlock(&ci->i_ceph_lock);
  256. err = __dcache_readdir(file, ctx, shared_gen);
  257. if (err != -EAGAIN)
  258. return err;
  259. frag = fpos_frag(ctx->pos);
  260. off = fpos_off(ctx->pos);
  261. } else {
  262. spin_unlock(&ci->i_ceph_lock);
  263. }
  264. if (fi->dentry) {
  265. err = note_last_dentry(fi, fi->dentry->d_name.name,
  266. fi->dentry->d_name.len);
  267. if (err)
  268. return err;
  269. dput(fi->dentry);
  270. fi->dentry = NULL;
  271. }
  272. /* proceed with a normal readdir */
  273. if (ctx->pos == 2) {
  274. /* note dir version at start of readdir so we can tell
  275. * if any dentries get dropped */
  276. fi->dir_release_count = atomic_read(&ci->i_release_count);
  277. fi->dir_ordered_count = ci->i_ordered_count;
  278. }
  279. more:
  280. /* do we have the correct frag content buffered? */
  281. if (fi->frag != frag || fi->last_readdir == NULL) {
  282. struct ceph_mds_request *req;
  283. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  284. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  285. /* discard old result, if any */
  286. if (fi->last_readdir) {
  287. ceph_mdsc_put_request(fi->last_readdir);
  288. fi->last_readdir = NULL;
  289. }
  290. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  291. ceph_vinop(inode), frag, fi->last_name);
  292. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  293. if (IS_ERR(req))
  294. return PTR_ERR(req);
  295. err = ceph_alloc_readdir_reply_buffer(req, inode);
  296. if (err) {
  297. ceph_mdsc_put_request(req);
  298. return err;
  299. }
  300. /* hints to request -> mds selection code */
  301. req->r_direct_mode = USE_AUTH_MDS;
  302. req->r_direct_hash = ceph_frag_value(frag);
  303. req->r_direct_is_hash = true;
  304. if (fi->last_name) {
  305. req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
  306. if (!req->r_path2) {
  307. ceph_mdsc_put_request(req);
  308. return -ENOMEM;
  309. }
  310. }
  311. req->r_readdir_offset = fi->next_offset;
  312. req->r_args.readdir.frag = cpu_to_le32(frag);
  313. req->r_inode = inode;
  314. ihold(inode);
  315. req->r_dentry = dget(file->f_path.dentry);
  316. err = ceph_mdsc_do_request(mdsc, NULL, req);
  317. if (err < 0) {
  318. ceph_mdsc_put_request(req);
  319. return err;
  320. }
  321. dout("readdir got and parsed readdir result=%d"
  322. " on frag %x, end=%d, complete=%d\n", err, frag,
  323. (int)req->r_reply_info.dir_end,
  324. (int)req->r_reply_info.dir_complete);
  325. if (!req->r_did_prepopulate) {
  326. dout("readdir !did_prepopulate");
  327. /* preclude from marking dir complete */
  328. fi->dir_release_count--;
  329. }
  330. /* note next offset and last dentry name */
  331. rinfo = &req->r_reply_info;
  332. if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
  333. frag = le32_to_cpu(rinfo->dir_dir->frag);
  334. if (ceph_frag_is_leftmost(frag))
  335. fi->next_offset = 2;
  336. else
  337. fi->next_offset = 0;
  338. off = fi->next_offset;
  339. }
  340. fi->frag = frag;
  341. fi->offset = fi->next_offset;
  342. fi->last_readdir = req;
  343. if (req->r_reply_info.dir_end) {
  344. kfree(fi->last_name);
  345. fi->last_name = NULL;
  346. if (ceph_frag_is_rightmost(frag))
  347. fi->next_offset = 2;
  348. else
  349. fi->next_offset = 0;
  350. } else {
  351. err = note_last_dentry(fi,
  352. rinfo->dir_dname[rinfo->dir_nr-1],
  353. rinfo->dir_dname_len[rinfo->dir_nr-1]);
  354. if (err)
  355. return err;
  356. fi->next_offset += rinfo->dir_nr;
  357. }
  358. }
  359. rinfo = &fi->last_readdir->r_reply_info;
  360. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  361. rinfo->dir_nr, off, fi->offset);
  362. ctx->pos = ceph_make_fpos(frag, off);
  363. while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
  364. struct ceph_mds_reply_inode *in =
  365. rinfo->dir_in[off - fi->offset].in;
  366. struct ceph_vino vino;
  367. ino_t ino;
  368. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  369. off, off - fi->offset, rinfo->dir_nr, ctx->pos,
  370. rinfo->dir_dname_len[off - fi->offset],
  371. rinfo->dir_dname[off - fi->offset], in);
  372. BUG_ON(!in);
  373. ftype = le32_to_cpu(in->mode) >> 12;
  374. vino.ino = le64_to_cpu(in->ino);
  375. vino.snap = le64_to_cpu(in->snapid);
  376. ino = ceph_vino_to_ino(vino);
  377. if (!dir_emit(ctx,
  378. rinfo->dir_dname[off - fi->offset],
  379. rinfo->dir_dname_len[off - fi->offset],
  380. ceph_translate_ino(inode->i_sb, ino), ftype)) {
  381. dout("filldir stopping us...\n");
  382. return 0;
  383. }
  384. off++;
  385. ctx->pos++;
  386. }
  387. if (fi->last_name) {
  388. ceph_mdsc_put_request(fi->last_readdir);
  389. fi->last_readdir = NULL;
  390. goto more;
  391. }
  392. /* more frags? */
  393. if (!ceph_frag_is_rightmost(frag)) {
  394. frag = ceph_frag_next(frag);
  395. off = 0;
  396. ctx->pos = ceph_make_fpos(frag, off);
  397. dout("readdir next frag is %x\n", frag);
  398. goto more;
  399. }
  400. fi->flags |= CEPH_F_ATEND;
  401. /*
  402. * if dir_release_count still matches the dir, no dentries
  403. * were released during the whole readdir, and we should have
  404. * the complete dir contents in our cache.
  405. */
  406. spin_lock(&ci->i_ceph_lock);
  407. if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
  408. if (ci->i_ordered_count == fi->dir_ordered_count)
  409. dout(" marking %p complete and ordered\n", inode);
  410. else
  411. dout(" marking %p complete\n", inode);
  412. __ceph_dir_set_complete(ci, fi->dir_release_count,
  413. fi->dir_ordered_count);
  414. }
  415. spin_unlock(&ci->i_ceph_lock);
  416. dout("readdir %p file %p done.\n", inode, file);
  417. return 0;
  418. }
  419. static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
  420. {
  421. if (fi->last_readdir) {
  422. ceph_mdsc_put_request(fi->last_readdir);
  423. fi->last_readdir = NULL;
  424. }
  425. kfree(fi->last_name);
  426. fi->last_name = NULL;
  427. if (ceph_frag_is_leftmost(frag))
  428. fi->next_offset = 2; /* compensate for . and .. */
  429. else
  430. fi->next_offset = 0;
  431. if (fi->dentry) {
  432. dput(fi->dentry);
  433. fi->dentry = NULL;
  434. }
  435. fi->flags &= ~CEPH_F_ATEND;
  436. }
  437. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
  438. {
  439. struct ceph_file_info *fi = file->private_data;
  440. struct inode *inode = file->f_mapping->host;
  441. loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
  442. loff_t retval;
  443. mutex_lock(&inode->i_mutex);
  444. retval = -EINVAL;
  445. switch (whence) {
  446. case SEEK_END:
  447. offset += inode->i_size + 2; /* FIXME */
  448. break;
  449. case SEEK_CUR:
  450. offset += file->f_pos;
  451. case SEEK_SET:
  452. break;
  453. default:
  454. goto out;
  455. }
  456. if (offset >= 0) {
  457. if (offset != file->f_pos) {
  458. file->f_pos = offset;
  459. file->f_version = 0;
  460. fi->flags &= ~CEPH_F_ATEND;
  461. }
  462. retval = offset;
  463. /*
  464. * discard buffered readdir content on seekdir(0), or
  465. * seek to new frag, or seek prior to current chunk.
  466. */
  467. if (offset == 0 ||
  468. fpos_frag(offset) != fi->frag ||
  469. fpos_off(offset) < fi->offset) {
  470. dout("dir_llseek dropping %p content\n", file);
  471. reset_readdir(fi, fpos_frag(offset));
  472. }
  473. /* bump dir_release_count if we did a forward seek */
  474. if (fpos_cmp(offset, old_offset) > 0)
  475. fi->dir_release_count--;
  476. }
  477. out:
  478. mutex_unlock(&inode->i_mutex);
  479. return retval;
  480. }
  481. /*
  482. * Handle lookups for the hidden .snap directory.
  483. */
  484. int ceph_handle_snapdir(struct ceph_mds_request *req,
  485. struct dentry *dentry, int err)
  486. {
  487. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  488. struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
  489. /* .snap dir? */
  490. if (err == -ENOENT &&
  491. ceph_snap(parent) == CEPH_NOSNAP &&
  492. strcmp(dentry->d_name.name,
  493. fsc->mount_options->snapdir_name) == 0) {
  494. struct inode *inode = ceph_get_snapdir(parent);
  495. dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
  496. dentry, dentry, inode);
  497. BUG_ON(!d_unhashed(dentry));
  498. d_add(dentry, inode);
  499. err = 0;
  500. }
  501. return err;
  502. }
  503. /*
  504. * Figure out final result of a lookup/open request.
  505. *
  506. * Mainly, make sure we return the final req->r_dentry (if it already
  507. * existed) in place of the original VFS-provided dentry when they
  508. * differ.
  509. *
  510. * Gracefully handle the case where the MDS replies with -ENOENT and
  511. * no trace (which it may do, at its discretion, e.g., if it doesn't
  512. * care to issue a lease on the negative dentry).
  513. */
  514. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  515. struct dentry *dentry, int err)
  516. {
  517. if (err == -ENOENT) {
  518. /* no trace? */
  519. err = 0;
  520. if (!req->r_reply_info.head->is_dentry) {
  521. dout("ENOENT and no trace, dentry %p inode %p\n",
  522. dentry, d_inode(dentry));
  523. if (d_really_is_positive(dentry)) {
  524. d_drop(dentry);
  525. err = -ENOENT;
  526. } else {
  527. d_add(dentry, NULL);
  528. }
  529. }
  530. }
  531. if (err)
  532. dentry = ERR_PTR(err);
  533. else if (dentry != req->r_dentry)
  534. dentry = dget(req->r_dentry); /* we got spliced */
  535. else
  536. dentry = NULL;
  537. return dentry;
  538. }
  539. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  540. {
  541. return ceph_ino(inode) == CEPH_INO_ROOT &&
  542. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  543. }
  544. /*
  545. * Look up a single dir entry. If there is a lookup intent, inform
  546. * the MDS so that it gets our 'caps wanted' value in a single op.
  547. */
  548. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  549. unsigned int flags)
  550. {
  551. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  552. struct ceph_mds_client *mdsc = fsc->mdsc;
  553. struct ceph_mds_request *req;
  554. int op;
  555. int err;
  556. dout("lookup %p dentry %p '%pd'\n",
  557. dir, dentry, dentry);
  558. if (dentry->d_name.len > NAME_MAX)
  559. return ERR_PTR(-ENAMETOOLONG);
  560. err = ceph_init_dentry(dentry);
  561. if (err < 0)
  562. return ERR_PTR(err);
  563. /* can we conclude ENOENT locally? */
  564. if (d_really_is_negative(dentry)) {
  565. struct ceph_inode_info *ci = ceph_inode(dir);
  566. struct ceph_dentry_info *di = ceph_dentry(dentry);
  567. spin_lock(&ci->i_ceph_lock);
  568. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  569. if (strncmp(dentry->d_name.name,
  570. fsc->mount_options->snapdir_name,
  571. dentry->d_name.len) &&
  572. !is_root_ceph_dentry(dir, dentry) &&
  573. ceph_test_mount_opt(fsc, DCACHE) &&
  574. __ceph_dir_is_complete(ci) &&
  575. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  576. spin_unlock(&ci->i_ceph_lock);
  577. dout(" dir %p complete, -ENOENT\n", dir);
  578. d_add(dentry, NULL);
  579. di->lease_shared_gen = ci->i_shared_gen;
  580. return NULL;
  581. }
  582. spin_unlock(&ci->i_ceph_lock);
  583. }
  584. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  585. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  586. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  587. if (IS_ERR(req))
  588. return ERR_CAST(req);
  589. req->r_dentry = dget(dentry);
  590. req->r_num_caps = 2;
  591. /* we only need inode linkage */
  592. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  593. req->r_locked_dir = dir;
  594. err = ceph_mdsc_do_request(mdsc, NULL, req);
  595. err = ceph_handle_snapdir(req, dentry, err);
  596. dentry = ceph_finish_lookup(req, dentry, err);
  597. ceph_mdsc_put_request(req); /* will dput(dentry) */
  598. dout("lookup result=%p\n", dentry);
  599. return dentry;
  600. }
  601. /*
  602. * If we do a create but get no trace back from the MDS, follow up with
  603. * a lookup (the VFS expects us to link up the provided dentry).
  604. */
  605. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  606. {
  607. struct dentry *result = ceph_lookup(dir, dentry, 0);
  608. if (result && !IS_ERR(result)) {
  609. /*
  610. * We created the item, then did a lookup, and found
  611. * it was already linked to another inode we already
  612. * had in our cache (and thus got spliced). To not
  613. * confuse VFS (especially when inode is a directory),
  614. * we don't link our dentry to that inode, return an
  615. * error instead.
  616. *
  617. * This event should be rare and it happens only when
  618. * we talk to old MDS. Recent MDS does not send traceless
  619. * reply for request that creates new inode.
  620. */
  621. d_drop(result);
  622. return -ESTALE;
  623. }
  624. return PTR_ERR(result);
  625. }
  626. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  627. umode_t mode, dev_t rdev)
  628. {
  629. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  630. struct ceph_mds_client *mdsc = fsc->mdsc;
  631. struct ceph_mds_request *req;
  632. struct ceph_acls_info acls = {};
  633. int err;
  634. if (ceph_snap(dir) != CEPH_NOSNAP)
  635. return -EROFS;
  636. err = ceph_pre_init_acls(dir, &mode, &acls);
  637. if (err < 0)
  638. return err;
  639. dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
  640. dir, dentry, mode, rdev);
  641. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  642. if (IS_ERR(req)) {
  643. err = PTR_ERR(req);
  644. goto out;
  645. }
  646. req->r_dentry = dget(dentry);
  647. req->r_num_caps = 2;
  648. req->r_locked_dir = dir;
  649. req->r_args.mknod.mode = cpu_to_le32(mode);
  650. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  651. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  652. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  653. if (acls.pagelist) {
  654. req->r_pagelist = acls.pagelist;
  655. acls.pagelist = NULL;
  656. }
  657. err = ceph_mdsc_do_request(mdsc, dir, req);
  658. if (!err && !req->r_reply_info.head->is_dentry)
  659. err = ceph_handle_notrace_create(dir, dentry);
  660. ceph_mdsc_put_request(req);
  661. out:
  662. if (!err)
  663. ceph_init_inode_acls(d_inode(dentry), &acls);
  664. else
  665. d_drop(dentry);
  666. ceph_release_acls_info(&acls);
  667. return err;
  668. }
  669. static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  670. bool excl)
  671. {
  672. return ceph_mknod(dir, dentry, mode, 0);
  673. }
  674. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  675. const char *dest)
  676. {
  677. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  678. struct ceph_mds_client *mdsc = fsc->mdsc;
  679. struct ceph_mds_request *req;
  680. int err;
  681. if (ceph_snap(dir) != CEPH_NOSNAP)
  682. return -EROFS;
  683. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  684. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  685. if (IS_ERR(req)) {
  686. err = PTR_ERR(req);
  687. goto out;
  688. }
  689. req->r_path2 = kstrdup(dest, GFP_NOFS);
  690. if (!req->r_path2) {
  691. err = -ENOMEM;
  692. ceph_mdsc_put_request(req);
  693. goto out;
  694. }
  695. req->r_locked_dir = dir;
  696. req->r_dentry = dget(dentry);
  697. req->r_num_caps = 2;
  698. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  699. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  700. err = ceph_mdsc_do_request(mdsc, dir, req);
  701. if (!err && !req->r_reply_info.head->is_dentry)
  702. err = ceph_handle_notrace_create(dir, dentry);
  703. ceph_mdsc_put_request(req);
  704. out:
  705. if (err)
  706. d_drop(dentry);
  707. return err;
  708. }
  709. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  710. {
  711. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  712. struct ceph_mds_client *mdsc = fsc->mdsc;
  713. struct ceph_mds_request *req;
  714. struct ceph_acls_info acls = {};
  715. int err = -EROFS;
  716. int op;
  717. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  718. /* mkdir .snap/foo is a MKSNAP */
  719. op = CEPH_MDS_OP_MKSNAP;
  720. dout("mksnap dir %p snap '%pd' dn %p\n", dir,
  721. dentry, dentry);
  722. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  723. dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
  724. op = CEPH_MDS_OP_MKDIR;
  725. } else {
  726. goto out;
  727. }
  728. mode |= S_IFDIR;
  729. err = ceph_pre_init_acls(dir, &mode, &acls);
  730. if (err < 0)
  731. goto out;
  732. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  733. if (IS_ERR(req)) {
  734. err = PTR_ERR(req);
  735. goto out;
  736. }
  737. req->r_dentry = dget(dentry);
  738. req->r_num_caps = 2;
  739. req->r_locked_dir = dir;
  740. req->r_args.mkdir.mode = cpu_to_le32(mode);
  741. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  742. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  743. if (acls.pagelist) {
  744. req->r_pagelist = acls.pagelist;
  745. acls.pagelist = NULL;
  746. }
  747. err = ceph_mdsc_do_request(mdsc, dir, req);
  748. if (!err &&
  749. !req->r_reply_info.head->is_target &&
  750. !req->r_reply_info.head->is_dentry)
  751. err = ceph_handle_notrace_create(dir, dentry);
  752. ceph_mdsc_put_request(req);
  753. out:
  754. if (!err)
  755. ceph_init_inode_acls(d_inode(dentry), &acls);
  756. else
  757. d_drop(dentry);
  758. ceph_release_acls_info(&acls);
  759. return err;
  760. }
  761. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  762. struct dentry *dentry)
  763. {
  764. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  765. struct ceph_mds_client *mdsc = fsc->mdsc;
  766. struct ceph_mds_request *req;
  767. int err;
  768. if (ceph_snap(dir) != CEPH_NOSNAP)
  769. return -EROFS;
  770. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  771. old_dentry, dentry);
  772. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  773. if (IS_ERR(req)) {
  774. d_drop(dentry);
  775. return PTR_ERR(req);
  776. }
  777. req->r_dentry = dget(dentry);
  778. req->r_num_caps = 2;
  779. req->r_old_dentry = dget(old_dentry);
  780. req->r_locked_dir = dir;
  781. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  782. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  783. /* release LINK_SHARED on source inode (mds will lock it) */
  784. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  785. err = ceph_mdsc_do_request(mdsc, dir, req);
  786. if (err) {
  787. d_drop(dentry);
  788. } else if (!req->r_reply_info.head->is_dentry) {
  789. ihold(d_inode(old_dentry));
  790. d_instantiate(dentry, d_inode(old_dentry));
  791. }
  792. ceph_mdsc_put_request(req);
  793. return err;
  794. }
  795. /*
  796. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  797. * looks like the link count will hit 0, drop any other caps (other
  798. * than PIN) we don't specifically want (due to the file still being
  799. * open).
  800. */
  801. static int drop_caps_for_unlink(struct inode *inode)
  802. {
  803. struct ceph_inode_info *ci = ceph_inode(inode);
  804. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  805. spin_lock(&ci->i_ceph_lock);
  806. if (inode->i_nlink == 1) {
  807. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  808. ci->i_ceph_flags |= CEPH_I_NODELAY;
  809. }
  810. spin_unlock(&ci->i_ceph_lock);
  811. return drop;
  812. }
  813. /*
  814. * rmdir and unlink are differ only by the metadata op code
  815. */
  816. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  817. {
  818. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  819. struct ceph_mds_client *mdsc = fsc->mdsc;
  820. struct inode *inode = d_inode(dentry);
  821. struct ceph_mds_request *req;
  822. int err = -EROFS;
  823. int op;
  824. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  825. /* rmdir .snap/foo is RMSNAP */
  826. dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
  827. op = CEPH_MDS_OP_RMSNAP;
  828. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  829. dout("unlink/rmdir dir %p dn %p inode %p\n",
  830. dir, dentry, inode);
  831. op = d_is_dir(dentry) ?
  832. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  833. } else
  834. goto out;
  835. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  836. if (IS_ERR(req)) {
  837. err = PTR_ERR(req);
  838. goto out;
  839. }
  840. req->r_dentry = dget(dentry);
  841. req->r_num_caps = 2;
  842. req->r_locked_dir = dir;
  843. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  844. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  845. req->r_inode_drop = drop_caps_for_unlink(inode);
  846. err = ceph_mdsc_do_request(mdsc, dir, req);
  847. if (!err && !req->r_reply_info.head->is_dentry)
  848. d_delete(dentry);
  849. ceph_mdsc_put_request(req);
  850. out:
  851. return err;
  852. }
  853. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  854. struct inode *new_dir, struct dentry *new_dentry)
  855. {
  856. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  857. struct ceph_mds_client *mdsc = fsc->mdsc;
  858. struct ceph_mds_request *req;
  859. int op = CEPH_MDS_OP_RENAME;
  860. int err;
  861. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  862. return -EXDEV;
  863. if (ceph_snap(old_dir) != CEPH_NOSNAP) {
  864. if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
  865. op = CEPH_MDS_OP_RENAMESNAP;
  866. else
  867. return -EROFS;
  868. }
  869. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  870. old_dir, old_dentry, new_dir, new_dentry);
  871. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  872. if (IS_ERR(req))
  873. return PTR_ERR(req);
  874. ihold(old_dir);
  875. req->r_dentry = dget(new_dentry);
  876. req->r_num_caps = 2;
  877. req->r_old_dentry = dget(old_dentry);
  878. req->r_old_dentry_dir = old_dir;
  879. req->r_locked_dir = new_dir;
  880. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  881. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  882. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  883. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  884. /* release LINK_RDCACHE on source inode (mds will lock it) */
  885. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  886. if (d_really_is_positive(new_dentry))
  887. req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
  888. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  889. if (!err && !req->r_reply_info.head->is_dentry) {
  890. /*
  891. * Normally d_move() is done by fill_trace (called by
  892. * do_request, above). If there is no trace, we need
  893. * to do it here.
  894. */
  895. d_move(old_dentry, new_dentry);
  896. /* ensure target dentry is invalidated, despite
  897. rehashing bug in vfs_rename_dir */
  898. ceph_invalidate_dentry_lease(new_dentry);
  899. /* d_move screws up sibling dentries' offsets */
  900. ceph_dir_clear_complete(old_dir);
  901. ceph_dir_clear_complete(new_dir);
  902. }
  903. ceph_mdsc_put_request(req);
  904. return err;
  905. }
  906. /*
  907. * Ensure a dentry lease will no longer revalidate.
  908. */
  909. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  910. {
  911. spin_lock(&dentry->d_lock);
  912. dentry->d_time = jiffies;
  913. ceph_dentry(dentry)->lease_shared_gen = 0;
  914. spin_unlock(&dentry->d_lock);
  915. }
  916. /*
  917. * Check if dentry lease is valid. If not, delete the lease. Try to
  918. * renew if the least is more than half up.
  919. */
  920. static int dentry_lease_is_valid(struct dentry *dentry)
  921. {
  922. struct ceph_dentry_info *di;
  923. struct ceph_mds_session *s;
  924. int valid = 0;
  925. u32 gen;
  926. unsigned long ttl;
  927. struct ceph_mds_session *session = NULL;
  928. struct inode *dir = NULL;
  929. u32 seq = 0;
  930. spin_lock(&dentry->d_lock);
  931. di = ceph_dentry(dentry);
  932. if (di->lease_session) {
  933. s = di->lease_session;
  934. spin_lock(&s->s_gen_ttl_lock);
  935. gen = s->s_cap_gen;
  936. ttl = s->s_cap_ttl;
  937. spin_unlock(&s->s_gen_ttl_lock);
  938. if (di->lease_gen == gen &&
  939. time_before(jiffies, dentry->d_time) &&
  940. time_before(jiffies, ttl)) {
  941. valid = 1;
  942. if (di->lease_renew_after &&
  943. time_after(jiffies, di->lease_renew_after)) {
  944. /* we should renew */
  945. dir = d_inode(dentry->d_parent);
  946. session = ceph_get_mds_session(s);
  947. seq = di->lease_seq;
  948. di->lease_renew_after = 0;
  949. di->lease_renew_from = jiffies;
  950. }
  951. }
  952. }
  953. spin_unlock(&dentry->d_lock);
  954. if (session) {
  955. ceph_mdsc_lease_send_msg(session, dir, dentry,
  956. CEPH_MDS_LEASE_RENEW, seq);
  957. ceph_put_mds_session(session);
  958. }
  959. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  960. return valid;
  961. }
  962. /*
  963. * Check if directory-wide content lease/cap is valid.
  964. */
  965. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  966. {
  967. struct ceph_inode_info *ci = ceph_inode(dir);
  968. struct ceph_dentry_info *di = ceph_dentry(dentry);
  969. int valid = 0;
  970. spin_lock(&ci->i_ceph_lock);
  971. if (ci->i_shared_gen == di->lease_shared_gen)
  972. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  973. spin_unlock(&ci->i_ceph_lock);
  974. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  975. dir, (unsigned)ci->i_shared_gen, dentry,
  976. (unsigned)di->lease_shared_gen, valid);
  977. return valid;
  978. }
  979. /*
  980. * Check if cached dentry can be trusted.
  981. */
  982. static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
  983. {
  984. int valid = 0;
  985. struct inode *dir;
  986. if (flags & LOOKUP_RCU)
  987. return -ECHILD;
  988. dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
  989. dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
  990. dir = ceph_get_dentry_parent_inode(dentry);
  991. /* always trust cached snapped dentries, snapdir dentry */
  992. if (ceph_snap(dir) != CEPH_NOSNAP) {
  993. dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
  994. dentry, d_inode(dentry));
  995. valid = 1;
  996. } else if (d_really_is_positive(dentry) &&
  997. ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
  998. valid = 1;
  999. } else if (dentry_lease_is_valid(dentry) ||
  1000. dir_lease_is_valid(dir, dentry)) {
  1001. if (d_really_is_positive(dentry))
  1002. valid = ceph_is_any_caps(d_inode(dentry));
  1003. else
  1004. valid = 1;
  1005. }
  1006. dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
  1007. if (valid) {
  1008. ceph_dentry_lru_touch(dentry);
  1009. } else {
  1010. ceph_dir_clear_complete(dir);
  1011. }
  1012. iput(dir);
  1013. return valid;
  1014. }
  1015. /*
  1016. * Release our ceph_dentry_info.
  1017. */
  1018. static void ceph_d_release(struct dentry *dentry)
  1019. {
  1020. struct ceph_dentry_info *di = ceph_dentry(dentry);
  1021. dout("d_release %p\n", dentry);
  1022. ceph_dentry_lru_del(dentry);
  1023. if (di->lease_session)
  1024. ceph_put_mds_session(di->lease_session);
  1025. kmem_cache_free(ceph_dentry_cachep, di);
  1026. dentry->d_fsdata = NULL;
  1027. }
  1028. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  1029. unsigned int flags)
  1030. {
  1031. /*
  1032. * Eventually, we'll want to revalidate snapped metadata
  1033. * too... probably...
  1034. */
  1035. return 1;
  1036. }
  1037. /*
  1038. * When the VFS prunes a dentry from the cache, we need to clear the
  1039. * complete flag on the parent directory.
  1040. *
  1041. * Called under dentry->d_lock.
  1042. */
  1043. static void ceph_d_prune(struct dentry *dentry)
  1044. {
  1045. dout("ceph_d_prune %p\n", dentry);
  1046. /* do we have a valid parent? */
  1047. if (IS_ROOT(dentry))
  1048. return;
  1049. /* if we are not hashed, we don't affect dir's completeness */
  1050. if (d_unhashed(dentry))
  1051. return;
  1052. /*
  1053. * we hold d_lock, so d_parent is stable, and d_fsdata is never
  1054. * cleared until d_release
  1055. */
  1056. ceph_dir_clear_complete(d_inode(dentry->d_parent));
  1057. }
  1058. /*
  1059. * read() on a dir. This weird interface hack only works if mounted
  1060. * with '-o dirstat'.
  1061. */
  1062. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  1063. loff_t *ppos)
  1064. {
  1065. struct ceph_file_info *cf = file->private_data;
  1066. struct inode *inode = file_inode(file);
  1067. struct ceph_inode_info *ci = ceph_inode(inode);
  1068. int left;
  1069. const int bufsize = 1024;
  1070. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  1071. return -EISDIR;
  1072. if (!cf->dir_info) {
  1073. cf->dir_info = kmalloc(bufsize, GFP_NOFS);
  1074. if (!cf->dir_info)
  1075. return -ENOMEM;
  1076. cf->dir_info_len =
  1077. snprintf(cf->dir_info, bufsize,
  1078. "entries: %20lld\n"
  1079. " files: %20lld\n"
  1080. " subdirs: %20lld\n"
  1081. "rentries: %20lld\n"
  1082. " rfiles: %20lld\n"
  1083. " rsubdirs: %20lld\n"
  1084. "rbytes: %20lld\n"
  1085. "rctime: %10ld.%09ld\n",
  1086. ci->i_files + ci->i_subdirs,
  1087. ci->i_files,
  1088. ci->i_subdirs,
  1089. ci->i_rfiles + ci->i_rsubdirs,
  1090. ci->i_rfiles,
  1091. ci->i_rsubdirs,
  1092. ci->i_rbytes,
  1093. (long)ci->i_rctime.tv_sec,
  1094. (long)ci->i_rctime.tv_nsec);
  1095. }
  1096. if (*ppos >= cf->dir_info_len)
  1097. return 0;
  1098. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  1099. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  1100. if (left == size)
  1101. return -EFAULT;
  1102. *ppos += (size - left);
  1103. return size - left;
  1104. }
  1105. /*
  1106. * an fsync() on a dir will wait for any uncommitted directory
  1107. * operations to commit.
  1108. */
  1109. static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
  1110. int datasync)
  1111. {
  1112. struct inode *inode = file_inode(file);
  1113. struct ceph_inode_info *ci = ceph_inode(inode);
  1114. struct list_head *head = &ci->i_unsafe_dirops;
  1115. struct ceph_mds_request *req;
  1116. u64 last_tid;
  1117. int ret = 0;
  1118. dout("dir_fsync %p\n", inode);
  1119. ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  1120. if (ret)
  1121. return ret;
  1122. mutex_lock(&inode->i_mutex);
  1123. spin_lock(&ci->i_unsafe_lock);
  1124. if (list_empty(head))
  1125. goto out;
  1126. req = list_entry(head->prev,
  1127. struct ceph_mds_request, r_unsafe_dir_item);
  1128. last_tid = req->r_tid;
  1129. do {
  1130. ceph_mdsc_get_request(req);
  1131. spin_unlock(&ci->i_unsafe_lock);
  1132. dout("dir_fsync %p wait on tid %llu (until %llu)\n",
  1133. inode, req->r_tid, last_tid);
  1134. if (req->r_timeout) {
  1135. unsigned long time_left = wait_for_completion_timeout(
  1136. &req->r_safe_completion,
  1137. req->r_timeout);
  1138. if (time_left > 0)
  1139. ret = 0;
  1140. else
  1141. ret = -EIO; /* timed out */
  1142. } else {
  1143. wait_for_completion(&req->r_safe_completion);
  1144. }
  1145. ceph_mdsc_put_request(req);
  1146. spin_lock(&ci->i_unsafe_lock);
  1147. if (ret || list_empty(head))
  1148. break;
  1149. req = list_entry(head->next,
  1150. struct ceph_mds_request, r_unsafe_dir_item);
  1151. } while (req->r_tid < last_tid);
  1152. out:
  1153. spin_unlock(&ci->i_unsafe_lock);
  1154. mutex_unlock(&inode->i_mutex);
  1155. return ret;
  1156. }
  1157. /*
  1158. * We maintain a private dentry LRU.
  1159. *
  1160. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1161. */
  1162. void ceph_dentry_lru_add(struct dentry *dn)
  1163. {
  1164. struct ceph_dentry_info *di = ceph_dentry(dn);
  1165. struct ceph_mds_client *mdsc;
  1166. dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
  1167. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1168. spin_lock(&mdsc->dentry_lru_lock);
  1169. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1170. mdsc->num_dentry++;
  1171. spin_unlock(&mdsc->dentry_lru_lock);
  1172. }
  1173. void ceph_dentry_lru_touch(struct dentry *dn)
  1174. {
  1175. struct ceph_dentry_info *di = ceph_dentry(dn);
  1176. struct ceph_mds_client *mdsc;
  1177. dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
  1178. di->offset);
  1179. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1180. spin_lock(&mdsc->dentry_lru_lock);
  1181. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1182. spin_unlock(&mdsc->dentry_lru_lock);
  1183. }
  1184. void ceph_dentry_lru_del(struct dentry *dn)
  1185. {
  1186. struct ceph_dentry_info *di = ceph_dentry(dn);
  1187. struct ceph_mds_client *mdsc;
  1188. dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
  1189. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1190. spin_lock(&mdsc->dentry_lru_lock);
  1191. list_del_init(&di->lru);
  1192. mdsc->num_dentry--;
  1193. spin_unlock(&mdsc->dentry_lru_lock);
  1194. }
  1195. /*
  1196. * Return name hash for a given dentry. This is dependent on
  1197. * the parent directory's hash function.
  1198. */
  1199. unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
  1200. {
  1201. struct ceph_inode_info *dci = ceph_inode(dir);
  1202. switch (dci->i_dir_layout.dl_dir_hash) {
  1203. case 0: /* for backward compat */
  1204. case CEPH_STR_HASH_LINUX:
  1205. return dn->d_name.hash;
  1206. default:
  1207. return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
  1208. dn->d_name.name, dn->d_name.len);
  1209. }
  1210. }
  1211. const struct file_operations ceph_dir_fops = {
  1212. .read = ceph_read_dir,
  1213. .iterate = ceph_readdir,
  1214. .llseek = ceph_dir_llseek,
  1215. .open = ceph_open,
  1216. .release = ceph_release,
  1217. .unlocked_ioctl = ceph_ioctl,
  1218. .fsync = ceph_dir_fsync,
  1219. };
  1220. const struct file_operations ceph_snapdir_fops = {
  1221. .iterate = ceph_readdir,
  1222. .llseek = ceph_dir_llseek,
  1223. .open = ceph_open,
  1224. .release = ceph_release,
  1225. };
  1226. const struct inode_operations ceph_dir_iops = {
  1227. .lookup = ceph_lookup,
  1228. .permission = ceph_permission,
  1229. .getattr = ceph_getattr,
  1230. .setattr = ceph_setattr,
  1231. .setxattr = ceph_setxattr,
  1232. .getxattr = ceph_getxattr,
  1233. .listxattr = ceph_listxattr,
  1234. .removexattr = ceph_removexattr,
  1235. .get_acl = ceph_get_acl,
  1236. .set_acl = ceph_set_acl,
  1237. .mknod = ceph_mknod,
  1238. .symlink = ceph_symlink,
  1239. .mkdir = ceph_mkdir,
  1240. .link = ceph_link,
  1241. .unlink = ceph_unlink,
  1242. .rmdir = ceph_unlink,
  1243. .rename = ceph_rename,
  1244. .create = ceph_create,
  1245. .atomic_open = ceph_atomic_open,
  1246. };
  1247. const struct inode_operations ceph_snapdir_iops = {
  1248. .lookup = ceph_lookup,
  1249. .permission = ceph_permission,
  1250. .getattr = ceph_getattr,
  1251. .mkdir = ceph_mkdir,
  1252. .rmdir = ceph_unlink,
  1253. .rename = ceph_rename,
  1254. };
  1255. const struct dentry_operations ceph_dentry_ops = {
  1256. .d_revalidate = ceph_d_revalidate,
  1257. .d_release = ceph_d_release,
  1258. .d_prune = ceph_d_prune,
  1259. };
  1260. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1261. .d_revalidate = ceph_snapdir_d_revalidate,
  1262. .d_release = ceph_d_release,
  1263. };
  1264. const struct dentry_operations ceph_snap_dentry_ops = {
  1265. .d_release = ceph_d_release,
  1266. .d_prune = ceph_d_prune,
  1267. };