dir.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct dentry_operations ceph_dentry_ops;
  25. /*
  26. * Initialize ceph dentry state.
  27. */
  28. int ceph_init_dentry(struct dentry *dentry)
  29. {
  30. struct ceph_dentry_info *di;
  31. if (dentry->d_fsdata)
  32. return 0;
  33. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO);
  34. if (!di)
  35. return -ENOMEM; /* oh well */
  36. spin_lock(&dentry->d_lock);
  37. if (dentry->d_fsdata) {
  38. /* lost a race */
  39. kmem_cache_free(ceph_dentry_cachep, di);
  40. goto out_unlock;
  41. }
  42. if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
  43. d_set_d_op(dentry, &ceph_dentry_ops);
  44. else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
  45. d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
  46. else
  47. d_set_d_op(dentry, &ceph_snap_dentry_ops);
  48. di->dentry = dentry;
  49. di->lease_session = NULL;
  50. dentry->d_time = jiffies;
  51. /* avoid reordering d_fsdata setup so that the check above is safe */
  52. smp_mb();
  53. dentry->d_fsdata = di;
  54. ceph_dentry_lru_add(dentry);
  55. out_unlock:
  56. spin_unlock(&dentry->d_lock);
  57. return 0;
  58. }
  59. struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
  60. {
  61. struct inode *inode = NULL;
  62. if (!dentry)
  63. return NULL;
  64. spin_lock(&dentry->d_lock);
  65. if (!IS_ROOT(dentry)) {
  66. inode = d_inode(dentry->d_parent);
  67. ihold(inode);
  68. }
  69. spin_unlock(&dentry->d_lock);
  70. return inode;
  71. }
  72. /*
  73. * for readdir, we encode the directory frag and offset within that
  74. * frag into f_pos.
  75. */
  76. static unsigned fpos_frag(loff_t p)
  77. {
  78. return p >> 32;
  79. }
  80. static unsigned fpos_off(loff_t p)
  81. {
  82. return p & 0xffffffff;
  83. }
  84. static int fpos_cmp(loff_t l, loff_t r)
  85. {
  86. int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
  87. if (v)
  88. return v;
  89. return (int)(fpos_off(l) - fpos_off(r));
  90. }
  91. /*
  92. * make note of the last dentry we read, so we can
  93. * continue at the same lexicographical point,
  94. * regardless of what dir changes take place on the
  95. * server.
  96. */
  97. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  98. int len, unsigned next_offset)
  99. {
  100. char *buf = kmalloc(len+1, GFP_KERNEL);
  101. if (!buf)
  102. return -ENOMEM;
  103. kfree(fi->last_name);
  104. fi->last_name = buf;
  105. memcpy(fi->last_name, name, len);
  106. fi->last_name[len] = 0;
  107. fi->next_offset = next_offset;
  108. dout("note_last_dentry '%s'\n", fi->last_name);
  109. return 0;
  110. }
  111. /*
  112. * When possible, we try to satisfy a readdir by peeking at the
  113. * dcache. We make this work by carefully ordering dentries on
  114. * d_child when we initially get results back from the MDS, and
  115. * falling back to a "normal" sync readdir if any dentries in the dir
  116. * are dropped.
  117. *
  118. * Complete dir indicates that we have all dentries in the dir. It is
  119. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  120. * the MDS if/when the directory is modified).
  121. */
  122. static int __dcache_readdir(struct file *file, struct dir_context *ctx,
  123. u32 shared_gen)
  124. {
  125. struct ceph_file_info *fi = file->private_data;
  126. struct dentry *parent = file->f_path.dentry;
  127. struct inode *dir = d_inode(parent);
  128. struct dentry *dentry, *last = NULL;
  129. struct ceph_dentry_info *di;
  130. unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
  131. int err = 0;
  132. loff_t ptr_pos = 0;
  133. struct ceph_readdir_cache_control cache_ctl = {};
  134. dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
  135. /* we can calculate cache index for the first dirfrag */
  136. if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) {
  137. cache_ctl.index = fpos_off(ctx->pos) - 2;
  138. BUG_ON(cache_ctl.index < 0);
  139. ptr_pos = cache_ctl.index * sizeof(struct dentry *);
  140. }
  141. while (true) {
  142. pgoff_t pgoff;
  143. bool emit_dentry;
  144. if (ptr_pos >= i_size_read(dir)) {
  145. fi->flags |= CEPH_F_ATEND;
  146. err = 0;
  147. break;
  148. }
  149. err = -EAGAIN;
  150. pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
  151. if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
  152. ceph_readdir_cache_release(&cache_ctl);
  153. cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
  154. if (!cache_ctl.page) {
  155. dout(" page %lu not found\n", pgoff);
  156. break;
  157. }
  158. /* reading/filling the cache are serialized by
  159. * i_mutex, no need to use page lock */
  160. unlock_page(cache_ctl.page);
  161. cache_ctl.dentries = kmap(cache_ctl.page);
  162. }
  163. rcu_read_lock();
  164. spin_lock(&parent->d_lock);
  165. /* check i_size again here, because empty directory can be
  166. * marked as complete while not holding the i_mutex. */
  167. if (ceph_dir_is_complete_ordered(dir) &&
  168. ptr_pos < i_size_read(dir))
  169. dentry = cache_ctl.dentries[cache_ctl.index % nsize];
  170. else
  171. dentry = NULL;
  172. spin_unlock(&parent->d_lock);
  173. if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
  174. dentry = NULL;
  175. rcu_read_unlock();
  176. if (!dentry)
  177. break;
  178. emit_dentry = false;
  179. di = ceph_dentry(dentry);
  180. spin_lock(&dentry->d_lock);
  181. if (di->lease_shared_gen == shared_gen &&
  182. d_really_is_positive(dentry) &&
  183. ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
  184. ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
  185. fpos_cmp(ctx->pos, di->offset) <= 0) {
  186. emit_dentry = true;
  187. }
  188. spin_unlock(&dentry->d_lock);
  189. if (emit_dentry) {
  190. dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
  191. dentry, dentry, d_inode(dentry));
  192. ctx->pos = di->offset;
  193. if (!dir_emit(ctx, dentry->d_name.name,
  194. dentry->d_name.len,
  195. ceph_translate_ino(dentry->d_sb,
  196. d_inode(dentry)->i_ino),
  197. d_inode(dentry)->i_mode >> 12)) {
  198. dput(dentry);
  199. err = 0;
  200. break;
  201. }
  202. ctx->pos++;
  203. if (last)
  204. dput(last);
  205. last = dentry;
  206. } else {
  207. dput(dentry);
  208. }
  209. cache_ctl.index++;
  210. ptr_pos += sizeof(struct dentry *);
  211. }
  212. ceph_readdir_cache_release(&cache_ctl);
  213. if (last) {
  214. int ret;
  215. di = ceph_dentry(last);
  216. ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
  217. fpos_off(di->offset) + 1);
  218. if (ret < 0)
  219. err = ret;
  220. dput(last);
  221. }
  222. return err;
  223. }
  224. static int ceph_readdir(struct file *file, struct dir_context *ctx)
  225. {
  226. struct ceph_file_info *fi = file->private_data;
  227. struct inode *inode = file_inode(file);
  228. struct ceph_inode_info *ci = ceph_inode(inode);
  229. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  230. struct ceph_mds_client *mdsc = fsc->mdsc;
  231. unsigned frag = fpos_frag(ctx->pos);
  232. int off = fpos_off(ctx->pos);
  233. int err;
  234. u32 ftype;
  235. struct ceph_mds_reply_info_parsed *rinfo;
  236. dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
  237. if (fi->flags & CEPH_F_ATEND)
  238. return 0;
  239. /* always start with . and .. */
  240. if (ctx->pos == 0) {
  241. dout("readdir off 0 -> '.'\n");
  242. if (!dir_emit(ctx, ".", 1,
  243. ceph_translate_ino(inode->i_sb, inode->i_ino),
  244. inode->i_mode >> 12))
  245. return 0;
  246. ctx->pos = 1;
  247. off = 1;
  248. }
  249. if (ctx->pos == 1) {
  250. ino_t ino = parent_ino(file->f_path.dentry);
  251. dout("readdir off 1 -> '..'\n");
  252. if (!dir_emit(ctx, "..", 2,
  253. ceph_translate_ino(inode->i_sb, ino),
  254. inode->i_mode >> 12))
  255. return 0;
  256. ctx->pos = 2;
  257. off = 2;
  258. }
  259. /* can we use the dcache? */
  260. spin_lock(&ci->i_ceph_lock);
  261. if (ceph_test_mount_opt(fsc, DCACHE) &&
  262. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  263. ceph_snap(inode) != CEPH_SNAPDIR &&
  264. __ceph_dir_is_complete_ordered(ci) &&
  265. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  266. u32 shared_gen = ci->i_shared_gen;
  267. spin_unlock(&ci->i_ceph_lock);
  268. err = __dcache_readdir(file, ctx, shared_gen);
  269. if (err != -EAGAIN)
  270. return err;
  271. frag = fpos_frag(ctx->pos);
  272. off = fpos_off(ctx->pos);
  273. } else {
  274. spin_unlock(&ci->i_ceph_lock);
  275. }
  276. /* proceed with a normal readdir */
  277. more:
  278. /* do we have the correct frag content buffered? */
  279. if (fi->frag != frag || fi->last_readdir == NULL) {
  280. struct ceph_mds_request *req;
  281. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  282. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  283. /* discard old result, if any */
  284. if (fi->last_readdir) {
  285. ceph_mdsc_put_request(fi->last_readdir);
  286. fi->last_readdir = NULL;
  287. }
  288. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  289. ceph_vinop(inode), frag, fi->last_name);
  290. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  291. if (IS_ERR(req))
  292. return PTR_ERR(req);
  293. err = ceph_alloc_readdir_reply_buffer(req, inode);
  294. if (err) {
  295. ceph_mdsc_put_request(req);
  296. return err;
  297. }
  298. /* hints to request -> mds selection code */
  299. req->r_direct_mode = USE_AUTH_MDS;
  300. req->r_direct_hash = ceph_frag_value(frag);
  301. req->r_direct_is_hash = true;
  302. if (fi->last_name) {
  303. req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
  304. if (!req->r_path2) {
  305. ceph_mdsc_put_request(req);
  306. return -ENOMEM;
  307. }
  308. }
  309. req->r_dir_release_cnt = fi->dir_release_count;
  310. req->r_dir_ordered_cnt = fi->dir_ordered_count;
  311. req->r_readdir_cache_idx = fi->readdir_cache_idx;
  312. req->r_readdir_offset = fi->next_offset;
  313. req->r_args.readdir.frag = cpu_to_le32(frag);
  314. req->r_inode = inode;
  315. ihold(inode);
  316. req->r_dentry = dget(file->f_path.dentry);
  317. err = ceph_mdsc_do_request(mdsc, NULL, req);
  318. if (err < 0) {
  319. ceph_mdsc_put_request(req);
  320. return err;
  321. }
  322. dout("readdir got and parsed readdir result=%d"
  323. " on frag %x, end=%d, complete=%d\n", err, frag,
  324. (int)req->r_reply_info.dir_end,
  325. (int)req->r_reply_info.dir_complete);
  326. /* note next offset and last dentry name */
  327. rinfo = &req->r_reply_info;
  328. if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
  329. frag = le32_to_cpu(rinfo->dir_dir->frag);
  330. off = req->r_readdir_offset;
  331. fi->next_offset = off;
  332. }
  333. fi->frag = frag;
  334. fi->offset = fi->next_offset;
  335. fi->last_readdir = req;
  336. if (req->r_did_prepopulate) {
  337. fi->readdir_cache_idx = req->r_readdir_cache_idx;
  338. if (fi->readdir_cache_idx < 0) {
  339. /* preclude from marking dir ordered */
  340. fi->dir_ordered_count = 0;
  341. } else if (ceph_frag_is_leftmost(frag) && off == 2) {
  342. /* note dir version at start of readdir so
  343. * we can tell if any dentries get dropped */
  344. fi->dir_release_count = req->r_dir_release_cnt;
  345. fi->dir_ordered_count = req->r_dir_ordered_cnt;
  346. }
  347. } else {
  348. dout("readdir !did_prepopulate");
  349. /* disable readdir cache */
  350. fi->readdir_cache_idx = -1;
  351. /* preclude from marking dir complete */
  352. fi->dir_release_count = 0;
  353. }
  354. if (req->r_reply_info.dir_end) {
  355. kfree(fi->last_name);
  356. fi->last_name = NULL;
  357. if (ceph_frag_is_rightmost(frag))
  358. fi->next_offset = 2;
  359. else
  360. fi->next_offset = 0;
  361. } else {
  362. err = note_last_dentry(fi,
  363. rinfo->dir_dname[rinfo->dir_nr-1],
  364. rinfo->dir_dname_len[rinfo->dir_nr-1],
  365. fi->next_offset + rinfo->dir_nr);
  366. if (err)
  367. return err;
  368. }
  369. }
  370. rinfo = &fi->last_readdir->r_reply_info;
  371. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  372. rinfo->dir_nr, off, fi->offset);
  373. ctx->pos = ceph_make_fpos(frag, off);
  374. while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
  375. struct ceph_mds_reply_inode *in =
  376. rinfo->dir_in[off - fi->offset].in;
  377. struct ceph_vino vino;
  378. ino_t ino;
  379. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  380. off, off - fi->offset, rinfo->dir_nr, ctx->pos,
  381. rinfo->dir_dname_len[off - fi->offset],
  382. rinfo->dir_dname[off - fi->offset], in);
  383. BUG_ON(!in);
  384. ftype = le32_to_cpu(in->mode) >> 12;
  385. vino.ino = le64_to_cpu(in->ino);
  386. vino.snap = le64_to_cpu(in->snapid);
  387. ino = ceph_vino_to_ino(vino);
  388. if (!dir_emit(ctx,
  389. rinfo->dir_dname[off - fi->offset],
  390. rinfo->dir_dname_len[off - fi->offset],
  391. ceph_translate_ino(inode->i_sb, ino), ftype)) {
  392. dout("filldir stopping us...\n");
  393. return 0;
  394. }
  395. off++;
  396. ctx->pos++;
  397. }
  398. if (fi->last_name) {
  399. ceph_mdsc_put_request(fi->last_readdir);
  400. fi->last_readdir = NULL;
  401. goto more;
  402. }
  403. /* more frags? */
  404. if (!ceph_frag_is_rightmost(frag)) {
  405. frag = ceph_frag_next(frag);
  406. off = 0;
  407. ctx->pos = ceph_make_fpos(frag, off);
  408. dout("readdir next frag is %x\n", frag);
  409. goto more;
  410. }
  411. fi->flags |= CEPH_F_ATEND;
  412. /*
  413. * if dir_release_count still matches the dir, no dentries
  414. * were released during the whole readdir, and we should have
  415. * the complete dir contents in our cache.
  416. */
  417. if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
  418. spin_lock(&ci->i_ceph_lock);
  419. if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
  420. dout(" marking %p complete and ordered\n", inode);
  421. /* use i_size to track number of entries in
  422. * readdir cache */
  423. BUG_ON(fi->readdir_cache_idx < 0);
  424. i_size_write(inode, fi->readdir_cache_idx *
  425. sizeof(struct dentry*));
  426. } else {
  427. dout(" marking %p complete\n", inode);
  428. }
  429. __ceph_dir_set_complete(ci, fi->dir_release_count,
  430. fi->dir_ordered_count);
  431. spin_unlock(&ci->i_ceph_lock);
  432. }
  433. dout("readdir %p file %p done.\n", inode, file);
  434. return 0;
  435. }
  436. static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
  437. {
  438. if (fi->last_readdir) {
  439. ceph_mdsc_put_request(fi->last_readdir);
  440. fi->last_readdir = NULL;
  441. }
  442. kfree(fi->last_name);
  443. fi->last_name = NULL;
  444. fi->dir_release_count = 0;
  445. fi->readdir_cache_idx = -1;
  446. if (ceph_frag_is_leftmost(frag))
  447. fi->next_offset = 2; /* compensate for . and .. */
  448. else
  449. fi->next_offset = 0;
  450. fi->flags &= ~CEPH_F_ATEND;
  451. }
  452. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
  453. {
  454. struct ceph_file_info *fi = file->private_data;
  455. struct inode *inode = file->f_mapping->host;
  456. loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
  457. loff_t retval;
  458. mutex_lock(&inode->i_mutex);
  459. retval = -EINVAL;
  460. switch (whence) {
  461. case SEEK_CUR:
  462. offset += file->f_pos;
  463. case SEEK_SET:
  464. break;
  465. case SEEK_END:
  466. retval = -EOPNOTSUPP;
  467. default:
  468. goto out;
  469. }
  470. if (offset >= 0) {
  471. if (offset != file->f_pos) {
  472. file->f_pos = offset;
  473. file->f_version = 0;
  474. fi->flags &= ~CEPH_F_ATEND;
  475. }
  476. retval = offset;
  477. if (offset == 0 ||
  478. fpos_frag(offset) != fi->frag ||
  479. fpos_off(offset) < fi->offset) {
  480. /* discard buffered readdir content on seekdir(0), or
  481. * seek to new frag, or seek prior to current chunk */
  482. dout("dir_llseek dropping %p content\n", file);
  483. reset_readdir(fi, fpos_frag(offset));
  484. } else if (fpos_cmp(offset, old_offset) > 0) {
  485. /* reset dir_release_count if we did a forward seek */
  486. fi->dir_release_count = 0;
  487. fi->readdir_cache_idx = -1;
  488. }
  489. }
  490. out:
  491. mutex_unlock(&inode->i_mutex);
  492. return retval;
  493. }
  494. /*
  495. * Handle lookups for the hidden .snap directory.
  496. */
  497. int ceph_handle_snapdir(struct ceph_mds_request *req,
  498. struct dentry *dentry, int err)
  499. {
  500. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  501. struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
  502. /* .snap dir? */
  503. if (err == -ENOENT &&
  504. ceph_snap(parent) == CEPH_NOSNAP &&
  505. strcmp(dentry->d_name.name,
  506. fsc->mount_options->snapdir_name) == 0) {
  507. struct inode *inode = ceph_get_snapdir(parent);
  508. dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
  509. dentry, dentry, inode);
  510. BUG_ON(!d_unhashed(dentry));
  511. d_add(dentry, inode);
  512. err = 0;
  513. }
  514. return err;
  515. }
  516. /*
  517. * Figure out final result of a lookup/open request.
  518. *
  519. * Mainly, make sure we return the final req->r_dentry (if it already
  520. * existed) in place of the original VFS-provided dentry when they
  521. * differ.
  522. *
  523. * Gracefully handle the case where the MDS replies with -ENOENT and
  524. * no trace (which it may do, at its discretion, e.g., if it doesn't
  525. * care to issue a lease on the negative dentry).
  526. */
  527. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  528. struct dentry *dentry, int err)
  529. {
  530. if (err == -ENOENT) {
  531. /* no trace? */
  532. err = 0;
  533. if (!req->r_reply_info.head->is_dentry) {
  534. dout("ENOENT and no trace, dentry %p inode %p\n",
  535. dentry, d_inode(dentry));
  536. if (d_really_is_positive(dentry)) {
  537. d_drop(dentry);
  538. err = -ENOENT;
  539. } else {
  540. d_add(dentry, NULL);
  541. }
  542. }
  543. }
  544. if (err)
  545. dentry = ERR_PTR(err);
  546. else if (dentry != req->r_dentry)
  547. dentry = dget(req->r_dentry); /* we got spliced */
  548. else
  549. dentry = NULL;
  550. return dentry;
  551. }
  552. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  553. {
  554. return ceph_ino(inode) == CEPH_INO_ROOT &&
  555. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  556. }
  557. /*
  558. * Look up a single dir entry. If there is a lookup intent, inform
  559. * the MDS so that it gets our 'caps wanted' value in a single op.
  560. */
  561. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  562. unsigned int flags)
  563. {
  564. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  565. struct ceph_mds_client *mdsc = fsc->mdsc;
  566. struct ceph_mds_request *req;
  567. int op;
  568. int err;
  569. dout("lookup %p dentry %p '%pd'\n",
  570. dir, dentry, dentry);
  571. if (dentry->d_name.len > NAME_MAX)
  572. return ERR_PTR(-ENAMETOOLONG);
  573. err = ceph_init_dentry(dentry);
  574. if (err < 0)
  575. return ERR_PTR(err);
  576. /* can we conclude ENOENT locally? */
  577. if (d_really_is_negative(dentry)) {
  578. struct ceph_inode_info *ci = ceph_inode(dir);
  579. struct ceph_dentry_info *di = ceph_dentry(dentry);
  580. spin_lock(&ci->i_ceph_lock);
  581. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  582. if (strncmp(dentry->d_name.name,
  583. fsc->mount_options->snapdir_name,
  584. dentry->d_name.len) &&
  585. !is_root_ceph_dentry(dir, dentry) &&
  586. ceph_test_mount_opt(fsc, DCACHE) &&
  587. __ceph_dir_is_complete(ci) &&
  588. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  589. spin_unlock(&ci->i_ceph_lock);
  590. dout(" dir %p complete, -ENOENT\n", dir);
  591. d_add(dentry, NULL);
  592. di->lease_shared_gen = ci->i_shared_gen;
  593. return NULL;
  594. }
  595. spin_unlock(&ci->i_ceph_lock);
  596. }
  597. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  598. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  599. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  600. if (IS_ERR(req))
  601. return ERR_CAST(req);
  602. req->r_dentry = dget(dentry);
  603. req->r_num_caps = 2;
  604. /* we only need inode linkage */
  605. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  606. req->r_locked_dir = dir;
  607. err = ceph_mdsc_do_request(mdsc, NULL, req);
  608. err = ceph_handle_snapdir(req, dentry, err);
  609. dentry = ceph_finish_lookup(req, dentry, err);
  610. ceph_mdsc_put_request(req); /* will dput(dentry) */
  611. dout("lookup result=%p\n", dentry);
  612. return dentry;
  613. }
  614. /*
  615. * If we do a create but get no trace back from the MDS, follow up with
  616. * a lookup (the VFS expects us to link up the provided dentry).
  617. */
  618. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  619. {
  620. struct dentry *result = ceph_lookup(dir, dentry, 0);
  621. if (result && !IS_ERR(result)) {
  622. /*
  623. * We created the item, then did a lookup, and found
  624. * it was already linked to another inode we already
  625. * had in our cache (and thus got spliced). To not
  626. * confuse VFS (especially when inode is a directory),
  627. * we don't link our dentry to that inode, return an
  628. * error instead.
  629. *
  630. * This event should be rare and it happens only when
  631. * we talk to old MDS. Recent MDS does not send traceless
  632. * reply for request that creates new inode.
  633. */
  634. d_drop(result);
  635. return -ESTALE;
  636. }
  637. return PTR_ERR(result);
  638. }
  639. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  640. umode_t mode, dev_t rdev)
  641. {
  642. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  643. struct ceph_mds_client *mdsc = fsc->mdsc;
  644. struct ceph_mds_request *req;
  645. struct ceph_acls_info acls = {};
  646. int err;
  647. if (ceph_snap(dir) != CEPH_NOSNAP)
  648. return -EROFS;
  649. err = ceph_pre_init_acls(dir, &mode, &acls);
  650. if (err < 0)
  651. return err;
  652. dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
  653. dir, dentry, mode, rdev);
  654. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  655. if (IS_ERR(req)) {
  656. err = PTR_ERR(req);
  657. goto out;
  658. }
  659. req->r_dentry = dget(dentry);
  660. req->r_num_caps = 2;
  661. req->r_locked_dir = dir;
  662. req->r_args.mknod.mode = cpu_to_le32(mode);
  663. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  664. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  665. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  666. if (acls.pagelist) {
  667. req->r_pagelist = acls.pagelist;
  668. acls.pagelist = NULL;
  669. }
  670. err = ceph_mdsc_do_request(mdsc, dir, req);
  671. if (!err && !req->r_reply_info.head->is_dentry)
  672. err = ceph_handle_notrace_create(dir, dentry);
  673. ceph_mdsc_put_request(req);
  674. out:
  675. if (!err)
  676. ceph_init_inode_acls(d_inode(dentry), &acls);
  677. else
  678. d_drop(dentry);
  679. ceph_release_acls_info(&acls);
  680. return err;
  681. }
  682. static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  683. bool excl)
  684. {
  685. return ceph_mknod(dir, dentry, mode, 0);
  686. }
  687. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  688. const char *dest)
  689. {
  690. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  691. struct ceph_mds_client *mdsc = fsc->mdsc;
  692. struct ceph_mds_request *req;
  693. int err;
  694. if (ceph_snap(dir) != CEPH_NOSNAP)
  695. return -EROFS;
  696. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  697. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  698. if (IS_ERR(req)) {
  699. err = PTR_ERR(req);
  700. goto out;
  701. }
  702. req->r_path2 = kstrdup(dest, GFP_KERNEL);
  703. if (!req->r_path2) {
  704. err = -ENOMEM;
  705. ceph_mdsc_put_request(req);
  706. goto out;
  707. }
  708. req->r_locked_dir = dir;
  709. req->r_dentry = dget(dentry);
  710. req->r_num_caps = 2;
  711. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  712. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  713. err = ceph_mdsc_do_request(mdsc, dir, req);
  714. if (!err && !req->r_reply_info.head->is_dentry)
  715. err = ceph_handle_notrace_create(dir, dentry);
  716. ceph_mdsc_put_request(req);
  717. out:
  718. if (err)
  719. d_drop(dentry);
  720. return err;
  721. }
  722. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  723. {
  724. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  725. struct ceph_mds_client *mdsc = fsc->mdsc;
  726. struct ceph_mds_request *req;
  727. struct ceph_acls_info acls = {};
  728. int err = -EROFS;
  729. int op;
  730. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  731. /* mkdir .snap/foo is a MKSNAP */
  732. op = CEPH_MDS_OP_MKSNAP;
  733. dout("mksnap dir %p snap '%pd' dn %p\n", dir,
  734. dentry, dentry);
  735. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  736. dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
  737. op = CEPH_MDS_OP_MKDIR;
  738. } else {
  739. goto out;
  740. }
  741. mode |= S_IFDIR;
  742. err = ceph_pre_init_acls(dir, &mode, &acls);
  743. if (err < 0)
  744. goto out;
  745. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  746. if (IS_ERR(req)) {
  747. err = PTR_ERR(req);
  748. goto out;
  749. }
  750. req->r_dentry = dget(dentry);
  751. req->r_num_caps = 2;
  752. req->r_locked_dir = dir;
  753. req->r_args.mkdir.mode = cpu_to_le32(mode);
  754. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  755. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  756. if (acls.pagelist) {
  757. req->r_pagelist = acls.pagelist;
  758. acls.pagelist = NULL;
  759. }
  760. err = ceph_mdsc_do_request(mdsc, dir, req);
  761. if (!err &&
  762. !req->r_reply_info.head->is_target &&
  763. !req->r_reply_info.head->is_dentry)
  764. err = ceph_handle_notrace_create(dir, dentry);
  765. ceph_mdsc_put_request(req);
  766. out:
  767. if (!err)
  768. ceph_init_inode_acls(d_inode(dentry), &acls);
  769. else
  770. d_drop(dentry);
  771. ceph_release_acls_info(&acls);
  772. return err;
  773. }
  774. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  775. struct dentry *dentry)
  776. {
  777. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  778. struct ceph_mds_client *mdsc = fsc->mdsc;
  779. struct ceph_mds_request *req;
  780. int err;
  781. if (ceph_snap(dir) != CEPH_NOSNAP)
  782. return -EROFS;
  783. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  784. old_dentry, dentry);
  785. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  786. if (IS_ERR(req)) {
  787. d_drop(dentry);
  788. return PTR_ERR(req);
  789. }
  790. req->r_dentry = dget(dentry);
  791. req->r_num_caps = 2;
  792. req->r_old_dentry = dget(old_dentry);
  793. req->r_locked_dir = dir;
  794. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  795. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  796. /* release LINK_SHARED on source inode (mds will lock it) */
  797. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  798. err = ceph_mdsc_do_request(mdsc, dir, req);
  799. if (err) {
  800. d_drop(dentry);
  801. } else if (!req->r_reply_info.head->is_dentry) {
  802. ihold(d_inode(old_dentry));
  803. d_instantiate(dentry, d_inode(old_dentry));
  804. }
  805. ceph_mdsc_put_request(req);
  806. return err;
  807. }
  808. /*
  809. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  810. * looks like the link count will hit 0, drop any other caps (other
  811. * than PIN) we don't specifically want (due to the file still being
  812. * open).
  813. */
  814. static int drop_caps_for_unlink(struct inode *inode)
  815. {
  816. struct ceph_inode_info *ci = ceph_inode(inode);
  817. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  818. spin_lock(&ci->i_ceph_lock);
  819. if (inode->i_nlink == 1) {
  820. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  821. ci->i_ceph_flags |= CEPH_I_NODELAY;
  822. }
  823. spin_unlock(&ci->i_ceph_lock);
  824. return drop;
  825. }
  826. /*
  827. * rmdir and unlink are differ only by the metadata op code
  828. */
  829. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  830. {
  831. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  832. struct ceph_mds_client *mdsc = fsc->mdsc;
  833. struct inode *inode = d_inode(dentry);
  834. struct ceph_mds_request *req;
  835. int err = -EROFS;
  836. int op;
  837. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  838. /* rmdir .snap/foo is RMSNAP */
  839. dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
  840. op = CEPH_MDS_OP_RMSNAP;
  841. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  842. dout("unlink/rmdir dir %p dn %p inode %p\n",
  843. dir, dentry, inode);
  844. op = d_is_dir(dentry) ?
  845. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  846. } else
  847. goto out;
  848. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  849. if (IS_ERR(req)) {
  850. err = PTR_ERR(req);
  851. goto out;
  852. }
  853. req->r_dentry = dget(dentry);
  854. req->r_num_caps = 2;
  855. req->r_locked_dir = dir;
  856. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  857. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  858. req->r_inode_drop = drop_caps_for_unlink(inode);
  859. err = ceph_mdsc_do_request(mdsc, dir, req);
  860. if (!err && !req->r_reply_info.head->is_dentry)
  861. d_delete(dentry);
  862. ceph_mdsc_put_request(req);
  863. out:
  864. return err;
  865. }
  866. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  867. struct inode *new_dir, struct dentry *new_dentry)
  868. {
  869. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  870. struct ceph_mds_client *mdsc = fsc->mdsc;
  871. struct ceph_mds_request *req;
  872. int op = CEPH_MDS_OP_RENAME;
  873. int err;
  874. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  875. return -EXDEV;
  876. if (ceph_snap(old_dir) != CEPH_NOSNAP) {
  877. if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
  878. op = CEPH_MDS_OP_RENAMESNAP;
  879. else
  880. return -EROFS;
  881. }
  882. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  883. old_dir, old_dentry, new_dir, new_dentry);
  884. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  885. if (IS_ERR(req))
  886. return PTR_ERR(req);
  887. ihold(old_dir);
  888. req->r_dentry = dget(new_dentry);
  889. req->r_num_caps = 2;
  890. req->r_old_dentry = dget(old_dentry);
  891. req->r_old_dentry_dir = old_dir;
  892. req->r_locked_dir = new_dir;
  893. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  894. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  895. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  896. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  897. /* release LINK_RDCACHE on source inode (mds will lock it) */
  898. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  899. if (d_really_is_positive(new_dentry))
  900. req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
  901. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  902. if (!err && !req->r_reply_info.head->is_dentry) {
  903. /*
  904. * Normally d_move() is done by fill_trace (called by
  905. * do_request, above). If there is no trace, we need
  906. * to do it here.
  907. */
  908. /* d_move screws up sibling dentries' offsets */
  909. ceph_dir_clear_complete(old_dir);
  910. ceph_dir_clear_complete(new_dir);
  911. d_move(old_dentry, new_dentry);
  912. /* ensure target dentry is invalidated, despite
  913. rehashing bug in vfs_rename_dir */
  914. ceph_invalidate_dentry_lease(new_dentry);
  915. }
  916. ceph_mdsc_put_request(req);
  917. return err;
  918. }
  919. /*
  920. * Ensure a dentry lease will no longer revalidate.
  921. */
  922. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  923. {
  924. spin_lock(&dentry->d_lock);
  925. dentry->d_time = jiffies;
  926. ceph_dentry(dentry)->lease_shared_gen = 0;
  927. spin_unlock(&dentry->d_lock);
  928. }
  929. /*
  930. * Check if dentry lease is valid. If not, delete the lease. Try to
  931. * renew if the least is more than half up.
  932. */
  933. static int dentry_lease_is_valid(struct dentry *dentry)
  934. {
  935. struct ceph_dentry_info *di;
  936. struct ceph_mds_session *s;
  937. int valid = 0;
  938. u32 gen;
  939. unsigned long ttl;
  940. struct ceph_mds_session *session = NULL;
  941. struct inode *dir = NULL;
  942. u32 seq = 0;
  943. spin_lock(&dentry->d_lock);
  944. di = ceph_dentry(dentry);
  945. if (di->lease_session) {
  946. s = di->lease_session;
  947. spin_lock(&s->s_gen_ttl_lock);
  948. gen = s->s_cap_gen;
  949. ttl = s->s_cap_ttl;
  950. spin_unlock(&s->s_gen_ttl_lock);
  951. if (di->lease_gen == gen &&
  952. time_before(jiffies, dentry->d_time) &&
  953. time_before(jiffies, ttl)) {
  954. valid = 1;
  955. if (di->lease_renew_after &&
  956. time_after(jiffies, di->lease_renew_after)) {
  957. /* we should renew */
  958. dir = d_inode(dentry->d_parent);
  959. session = ceph_get_mds_session(s);
  960. seq = di->lease_seq;
  961. di->lease_renew_after = 0;
  962. di->lease_renew_from = jiffies;
  963. }
  964. }
  965. }
  966. spin_unlock(&dentry->d_lock);
  967. if (session) {
  968. ceph_mdsc_lease_send_msg(session, dir, dentry,
  969. CEPH_MDS_LEASE_RENEW, seq);
  970. ceph_put_mds_session(session);
  971. }
  972. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  973. return valid;
  974. }
  975. /*
  976. * Check if directory-wide content lease/cap is valid.
  977. */
  978. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  979. {
  980. struct ceph_inode_info *ci = ceph_inode(dir);
  981. struct ceph_dentry_info *di = ceph_dentry(dentry);
  982. int valid = 0;
  983. spin_lock(&ci->i_ceph_lock);
  984. if (ci->i_shared_gen == di->lease_shared_gen)
  985. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  986. spin_unlock(&ci->i_ceph_lock);
  987. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  988. dir, (unsigned)ci->i_shared_gen, dentry,
  989. (unsigned)di->lease_shared_gen, valid);
  990. return valid;
  991. }
  992. /*
  993. * Check if cached dentry can be trusted.
  994. */
  995. static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
  996. {
  997. int valid = 0;
  998. struct inode *dir;
  999. if (flags & LOOKUP_RCU)
  1000. return -ECHILD;
  1001. dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
  1002. dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
  1003. dir = ceph_get_dentry_parent_inode(dentry);
  1004. /* always trust cached snapped dentries, snapdir dentry */
  1005. if (ceph_snap(dir) != CEPH_NOSNAP) {
  1006. dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
  1007. dentry, d_inode(dentry));
  1008. valid = 1;
  1009. } else if (d_really_is_positive(dentry) &&
  1010. ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
  1011. valid = 1;
  1012. } else if (dentry_lease_is_valid(dentry) ||
  1013. dir_lease_is_valid(dir, dentry)) {
  1014. if (d_really_is_positive(dentry))
  1015. valid = ceph_is_any_caps(d_inode(dentry));
  1016. else
  1017. valid = 1;
  1018. }
  1019. dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
  1020. if (valid) {
  1021. ceph_dentry_lru_touch(dentry);
  1022. } else {
  1023. ceph_dir_clear_complete(dir);
  1024. }
  1025. iput(dir);
  1026. return valid;
  1027. }
  1028. /*
  1029. * Release our ceph_dentry_info.
  1030. */
  1031. static void ceph_d_release(struct dentry *dentry)
  1032. {
  1033. struct ceph_dentry_info *di = ceph_dentry(dentry);
  1034. dout("d_release %p\n", dentry);
  1035. ceph_dentry_lru_del(dentry);
  1036. if (di->lease_session)
  1037. ceph_put_mds_session(di->lease_session);
  1038. kmem_cache_free(ceph_dentry_cachep, di);
  1039. dentry->d_fsdata = NULL;
  1040. }
  1041. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  1042. unsigned int flags)
  1043. {
  1044. /*
  1045. * Eventually, we'll want to revalidate snapped metadata
  1046. * too... probably...
  1047. */
  1048. return 1;
  1049. }
  1050. /*
  1051. * When the VFS prunes a dentry from the cache, we need to clear the
  1052. * complete flag on the parent directory.
  1053. *
  1054. * Called under dentry->d_lock.
  1055. */
  1056. static void ceph_d_prune(struct dentry *dentry)
  1057. {
  1058. dout("ceph_d_prune %p\n", dentry);
  1059. /* do we have a valid parent? */
  1060. if (IS_ROOT(dentry))
  1061. return;
  1062. /* if we are not hashed, we don't affect dir's completeness */
  1063. if (d_unhashed(dentry))
  1064. return;
  1065. /*
  1066. * we hold d_lock, so d_parent is stable, and d_fsdata is never
  1067. * cleared until d_release
  1068. */
  1069. ceph_dir_clear_complete(d_inode(dentry->d_parent));
  1070. }
  1071. /*
  1072. * read() on a dir. This weird interface hack only works if mounted
  1073. * with '-o dirstat'.
  1074. */
  1075. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  1076. loff_t *ppos)
  1077. {
  1078. struct ceph_file_info *cf = file->private_data;
  1079. struct inode *inode = file_inode(file);
  1080. struct ceph_inode_info *ci = ceph_inode(inode);
  1081. int left;
  1082. const int bufsize = 1024;
  1083. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  1084. return -EISDIR;
  1085. if (!cf->dir_info) {
  1086. cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
  1087. if (!cf->dir_info)
  1088. return -ENOMEM;
  1089. cf->dir_info_len =
  1090. snprintf(cf->dir_info, bufsize,
  1091. "entries: %20lld\n"
  1092. " files: %20lld\n"
  1093. " subdirs: %20lld\n"
  1094. "rentries: %20lld\n"
  1095. " rfiles: %20lld\n"
  1096. " rsubdirs: %20lld\n"
  1097. "rbytes: %20lld\n"
  1098. "rctime: %10ld.%09ld\n",
  1099. ci->i_files + ci->i_subdirs,
  1100. ci->i_files,
  1101. ci->i_subdirs,
  1102. ci->i_rfiles + ci->i_rsubdirs,
  1103. ci->i_rfiles,
  1104. ci->i_rsubdirs,
  1105. ci->i_rbytes,
  1106. (long)ci->i_rctime.tv_sec,
  1107. (long)ci->i_rctime.tv_nsec);
  1108. }
  1109. if (*ppos >= cf->dir_info_len)
  1110. return 0;
  1111. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  1112. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  1113. if (left == size)
  1114. return -EFAULT;
  1115. *ppos += (size - left);
  1116. return size - left;
  1117. }
  1118. /*
  1119. * We maintain a private dentry LRU.
  1120. *
  1121. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1122. */
  1123. void ceph_dentry_lru_add(struct dentry *dn)
  1124. {
  1125. struct ceph_dentry_info *di = ceph_dentry(dn);
  1126. struct ceph_mds_client *mdsc;
  1127. dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
  1128. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1129. spin_lock(&mdsc->dentry_lru_lock);
  1130. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1131. mdsc->num_dentry++;
  1132. spin_unlock(&mdsc->dentry_lru_lock);
  1133. }
  1134. void ceph_dentry_lru_touch(struct dentry *dn)
  1135. {
  1136. struct ceph_dentry_info *di = ceph_dentry(dn);
  1137. struct ceph_mds_client *mdsc;
  1138. dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
  1139. di->offset);
  1140. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1141. spin_lock(&mdsc->dentry_lru_lock);
  1142. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1143. spin_unlock(&mdsc->dentry_lru_lock);
  1144. }
  1145. void ceph_dentry_lru_del(struct dentry *dn)
  1146. {
  1147. struct ceph_dentry_info *di = ceph_dentry(dn);
  1148. struct ceph_mds_client *mdsc;
  1149. dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
  1150. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1151. spin_lock(&mdsc->dentry_lru_lock);
  1152. list_del_init(&di->lru);
  1153. mdsc->num_dentry--;
  1154. spin_unlock(&mdsc->dentry_lru_lock);
  1155. }
  1156. /*
  1157. * Return name hash for a given dentry. This is dependent on
  1158. * the parent directory's hash function.
  1159. */
  1160. unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
  1161. {
  1162. struct ceph_inode_info *dci = ceph_inode(dir);
  1163. switch (dci->i_dir_layout.dl_dir_hash) {
  1164. case 0: /* for backward compat */
  1165. case CEPH_STR_HASH_LINUX:
  1166. return dn->d_name.hash;
  1167. default:
  1168. return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
  1169. dn->d_name.name, dn->d_name.len);
  1170. }
  1171. }
  1172. const struct file_operations ceph_dir_fops = {
  1173. .read = ceph_read_dir,
  1174. .iterate = ceph_readdir,
  1175. .llseek = ceph_dir_llseek,
  1176. .open = ceph_open,
  1177. .release = ceph_release,
  1178. .unlocked_ioctl = ceph_ioctl,
  1179. .fsync = ceph_fsync,
  1180. };
  1181. const struct file_operations ceph_snapdir_fops = {
  1182. .iterate = ceph_readdir,
  1183. .llseek = ceph_dir_llseek,
  1184. .open = ceph_open,
  1185. .release = ceph_release,
  1186. };
  1187. const struct inode_operations ceph_dir_iops = {
  1188. .lookup = ceph_lookup,
  1189. .permission = ceph_permission,
  1190. .getattr = ceph_getattr,
  1191. .setattr = ceph_setattr,
  1192. .setxattr = ceph_setxattr,
  1193. .getxattr = ceph_getxattr,
  1194. .listxattr = ceph_listxattr,
  1195. .removexattr = ceph_removexattr,
  1196. .get_acl = ceph_get_acl,
  1197. .set_acl = ceph_set_acl,
  1198. .mknod = ceph_mknod,
  1199. .symlink = ceph_symlink,
  1200. .mkdir = ceph_mkdir,
  1201. .link = ceph_link,
  1202. .unlink = ceph_unlink,
  1203. .rmdir = ceph_unlink,
  1204. .rename = ceph_rename,
  1205. .create = ceph_create,
  1206. .atomic_open = ceph_atomic_open,
  1207. };
  1208. const struct inode_operations ceph_snapdir_iops = {
  1209. .lookup = ceph_lookup,
  1210. .permission = ceph_permission,
  1211. .getattr = ceph_getattr,
  1212. .mkdir = ceph_mkdir,
  1213. .rmdir = ceph_unlink,
  1214. .rename = ceph_rename,
  1215. };
  1216. const struct dentry_operations ceph_dentry_ops = {
  1217. .d_revalidate = ceph_d_revalidate,
  1218. .d_release = ceph_d_release,
  1219. .d_prune = ceph_d_prune,
  1220. };
  1221. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1222. .d_revalidate = ceph_snapdir_d_revalidate,
  1223. .d_release = ceph_d_release,
  1224. };
  1225. const struct dentry_operations ceph_snap_dentry_ops = {
  1226. .d_release = ceph_d_release,
  1227. .d_prune = ceph_d_prune,
  1228. };