dir.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include <linux/xattr.h>
  8. #include "super.h"
  9. #include "mds_client.h"
  10. /*
  11. * Directory operations: readdir, lookup, create, link, unlink,
  12. * rename, etc.
  13. */
  14. /*
  15. * Ceph MDS operations are specified in terms of a base ino and
  16. * relative path. Thus, the client can specify an operation on a
  17. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  18. * relative to, say, the root directory.
  19. *
  20. * Normally, we limit ourselves to strict inode ops (no path component)
  21. * or dentry operations (a single path component relative to an ino). The
  22. * exception to this is open_root_dentry(), which will open the mount
  23. * point by name.
  24. */
  25. const struct dentry_operations ceph_dentry_ops;
  26. /*
  27. * Initialize ceph dentry state.
  28. */
  29. static int ceph_d_init(struct dentry *dentry)
  30. {
  31. struct ceph_dentry_info *di;
  32. di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
  33. if (!di)
  34. return -ENOMEM; /* oh well */
  35. di->dentry = dentry;
  36. di->lease_session = NULL;
  37. di->time = jiffies;
  38. dentry->d_fsdata = di;
  39. ceph_dentry_lru_add(dentry);
  40. return 0;
  41. }
  42. /*
  43. * for f_pos for readdir:
  44. * - hash order:
  45. * (0xff << 52) | ((24 bits hash) << 28) |
  46. * (the nth entry has hash collision);
  47. * - frag+name order;
  48. * ((frag value) << 28) | (the nth entry in frag);
  49. */
  50. #define OFFSET_BITS 28
  51. #define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
  52. #define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
  53. loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
  54. {
  55. loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
  56. if (hash_order)
  57. fpos |= HASH_ORDER;
  58. return fpos;
  59. }
  60. static bool is_hash_order(loff_t p)
  61. {
  62. return (p & HASH_ORDER) == HASH_ORDER;
  63. }
  64. static unsigned fpos_frag(loff_t p)
  65. {
  66. return p >> OFFSET_BITS;
  67. }
  68. static unsigned fpos_hash(loff_t p)
  69. {
  70. return ceph_frag_value(fpos_frag(p));
  71. }
  72. static unsigned fpos_off(loff_t p)
  73. {
  74. return p & OFFSET_MASK;
  75. }
  76. static int fpos_cmp(loff_t l, loff_t r)
  77. {
  78. int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
  79. if (v)
  80. return v;
  81. return (int)(fpos_off(l) - fpos_off(r));
  82. }
  83. /*
  84. * make note of the last dentry we read, so we can
  85. * continue at the same lexicographical point,
  86. * regardless of what dir changes take place on the
  87. * server.
  88. */
  89. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  90. int len, unsigned next_offset)
  91. {
  92. char *buf = kmalloc(len+1, GFP_KERNEL);
  93. if (!buf)
  94. return -ENOMEM;
  95. kfree(fi->last_name);
  96. fi->last_name = buf;
  97. memcpy(fi->last_name, name, len);
  98. fi->last_name[len] = 0;
  99. fi->next_offset = next_offset;
  100. dout("note_last_dentry '%s'\n", fi->last_name);
  101. return 0;
  102. }
  103. static struct dentry *
  104. __dcache_find_get_entry(struct dentry *parent, u64 idx,
  105. struct ceph_readdir_cache_control *cache_ctl)
  106. {
  107. struct inode *dir = d_inode(parent);
  108. struct dentry *dentry;
  109. unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
  110. loff_t ptr_pos = idx * sizeof(struct dentry *);
  111. pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
  112. if (ptr_pos >= i_size_read(dir))
  113. return NULL;
  114. if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
  115. ceph_readdir_cache_release(cache_ctl);
  116. cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
  117. if (!cache_ctl->page) {
  118. dout(" page %lu not found\n", ptr_pgoff);
  119. return ERR_PTR(-EAGAIN);
  120. }
  121. /* reading/filling the cache are serialized by
  122. i_mutex, no need to use page lock */
  123. unlock_page(cache_ctl->page);
  124. cache_ctl->dentries = kmap(cache_ctl->page);
  125. }
  126. cache_ctl->index = idx & idx_mask;
  127. rcu_read_lock();
  128. spin_lock(&parent->d_lock);
  129. /* check i_size again here, because empty directory can be
  130. * marked as complete while not holding the i_mutex. */
  131. if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
  132. dentry = cache_ctl->dentries[cache_ctl->index];
  133. else
  134. dentry = NULL;
  135. spin_unlock(&parent->d_lock);
  136. if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
  137. dentry = NULL;
  138. rcu_read_unlock();
  139. return dentry ? : ERR_PTR(-EAGAIN);
  140. }
  141. /*
  142. * When possible, we try to satisfy a readdir by peeking at the
  143. * dcache. We make this work by carefully ordering dentries on
  144. * d_child when we initially get results back from the MDS, and
  145. * falling back to a "normal" sync readdir if any dentries in the dir
  146. * are dropped.
  147. *
  148. * Complete dir indicates that we have all dentries in the dir. It is
  149. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  150. * the MDS if/when the directory is modified).
  151. */
  152. static int __dcache_readdir(struct file *file, struct dir_context *ctx,
  153. u32 shared_gen)
  154. {
  155. struct ceph_file_info *fi = file->private_data;
  156. struct dentry *parent = file->f_path.dentry;
  157. struct inode *dir = d_inode(parent);
  158. struct dentry *dentry, *last = NULL;
  159. struct ceph_dentry_info *di;
  160. struct ceph_readdir_cache_control cache_ctl = {};
  161. u64 idx = 0;
  162. int err = 0;
  163. dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
  164. /* search start position */
  165. if (ctx->pos > 2) {
  166. u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
  167. while (count > 0) {
  168. u64 step = count >> 1;
  169. dentry = __dcache_find_get_entry(parent, idx + step,
  170. &cache_ctl);
  171. if (!dentry) {
  172. /* use linar search */
  173. idx = 0;
  174. break;
  175. }
  176. if (IS_ERR(dentry)) {
  177. err = PTR_ERR(dentry);
  178. goto out;
  179. }
  180. di = ceph_dentry(dentry);
  181. spin_lock(&dentry->d_lock);
  182. if (fpos_cmp(di->offset, ctx->pos) < 0) {
  183. idx += step + 1;
  184. count -= step + 1;
  185. } else {
  186. count = step;
  187. }
  188. spin_unlock(&dentry->d_lock);
  189. dput(dentry);
  190. }
  191. dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
  192. }
  193. for (;;) {
  194. bool emit_dentry = false;
  195. dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
  196. if (!dentry) {
  197. fi->flags |= CEPH_F_ATEND;
  198. err = 0;
  199. break;
  200. }
  201. if (IS_ERR(dentry)) {
  202. err = PTR_ERR(dentry);
  203. goto out;
  204. }
  205. di = ceph_dentry(dentry);
  206. spin_lock(&dentry->d_lock);
  207. if (di->lease_shared_gen == shared_gen &&
  208. d_really_is_positive(dentry) &&
  209. fpos_cmp(ctx->pos, di->offset) <= 0) {
  210. emit_dentry = true;
  211. }
  212. spin_unlock(&dentry->d_lock);
  213. if (emit_dentry) {
  214. dout(" %llx dentry %p %pd %p\n", di->offset,
  215. dentry, dentry, d_inode(dentry));
  216. ctx->pos = di->offset;
  217. if (!dir_emit(ctx, dentry->d_name.name,
  218. dentry->d_name.len,
  219. ceph_translate_ino(dentry->d_sb,
  220. d_inode(dentry)->i_ino),
  221. d_inode(dentry)->i_mode >> 12)) {
  222. dput(dentry);
  223. err = 0;
  224. break;
  225. }
  226. ctx->pos++;
  227. if (last)
  228. dput(last);
  229. last = dentry;
  230. } else {
  231. dput(dentry);
  232. }
  233. }
  234. out:
  235. ceph_readdir_cache_release(&cache_ctl);
  236. if (last) {
  237. int ret;
  238. di = ceph_dentry(last);
  239. ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
  240. fpos_off(di->offset) + 1);
  241. if (ret < 0)
  242. err = ret;
  243. dput(last);
  244. }
  245. return err;
  246. }
  247. static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
  248. {
  249. if (!fi->last_readdir)
  250. return true;
  251. if (is_hash_order(pos))
  252. return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
  253. else
  254. return fi->frag != fpos_frag(pos);
  255. }
  256. static int ceph_readdir(struct file *file, struct dir_context *ctx)
  257. {
  258. struct ceph_file_info *fi = file->private_data;
  259. struct inode *inode = file_inode(file);
  260. struct ceph_inode_info *ci = ceph_inode(inode);
  261. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  262. struct ceph_mds_client *mdsc = fsc->mdsc;
  263. int i;
  264. int err;
  265. u32 ftype;
  266. struct ceph_mds_reply_info_parsed *rinfo;
  267. dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
  268. if (fi->flags & CEPH_F_ATEND)
  269. return 0;
  270. /* always start with . and .. */
  271. if (ctx->pos == 0) {
  272. dout("readdir off 0 -> '.'\n");
  273. if (!dir_emit(ctx, ".", 1,
  274. ceph_translate_ino(inode->i_sb, inode->i_ino),
  275. inode->i_mode >> 12))
  276. return 0;
  277. ctx->pos = 1;
  278. }
  279. if (ctx->pos == 1) {
  280. ino_t ino = parent_ino(file->f_path.dentry);
  281. dout("readdir off 1 -> '..'\n");
  282. if (!dir_emit(ctx, "..", 2,
  283. ceph_translate_ino(inode->i_sb, ino),
  284. inode->i_mode >> 12))
  285. return 0;
  286. ctx->pos = 2;
  287. }
  288. /* can we use the dcache? */
  289. spin_lock(&ci->i_ceph_lock);
  290. if (ceph_test_mount_opt(fsc, DCACHE) &&
  291. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  292. ceph_snap(inode) != CEPH_SNAPDIR &&
  293. __ceph_dir_is_complete_ordered(ci) &&
  294. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  295. u32 shared_gen = ci->i_shared_gen;
  296. spin_unlock(&ci->i_ceph_lock);
  297. err = __dcache_readdir(file, ctx, shared_gen);
  298. if (err != -EAGAIN)
  299. return err;
  300. } else {
  301. spin_unlock(&ci->i_ceph_lock);
  302. }
  303. /* proceed with a normal readdir */
  304. more:
  305. /* do we have the correct frag content buffered? */
  306. if (need_send_readdir(fi, ctx->pos)) {
  307. struct ceph_mds_request *req;
  308. unsigned frag;
  309. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  310. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  311. /* discard old result, if any */
  312. if (fi->last_readdir) {
  313. ceph_mdsc_put_request(fi->last_readdir);
  314. fi->last_readdir = NULL;
  315. }
  316. if (is_hash_order(ctx->pos)) {
  317. frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
  318. NULL, NULL);
  319. } else {
  320. frag = fpos_frag(ctx->pos);
  321. }
  322. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  323. ceph_vinop(inode), frag, fi->last_name);
  324. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  325. if (IS_ERR(req))
  326. return PTR_ERR(req);
  327. err = ceph_alloc_readdir_reply_buffer(req, inode);
  328. if (err) {
  329. ceph_mdsc_put_request(req);
  330. return err;
  331. }
  332. /* hints to request -> mds selection code */
  333. req->r_direct_mode = USE_AUTH_MDS;
  334. req->r_direct_hash = ceph_frag_value(frag);
  335. req->r_direct_is_hash = true;
  336. if (fi->last_name) {
  337. req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
  338. if (!req->r_path2) {
  339. ceph_mdsc_put_request(req);
  340. return -ENOMEM;
  341. }
  342. }
  343. req->r_dir_release_cnt = fi->dir_release_count;
  344. req->r_dir_ordered_cnt = fi->dir_ordered_count;
  345. req->r_readdir_cache_idx = fi->readdir_cache_idx;
  346. req->r_readdir_offset = fi->next_offset;
  347. req->r_args.readdir.frag = cpu_to_le32(frag);
  348. req->r_args.readdir.flags =
  349. cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
  350. req->r_inode = inode;
  351. ihold(inode);
  352. req->r_dentry = dget(file->f_path.dentry);
  353. err = ceph_mdsc_do_request(mdsc, NULL, req);
  354. if (err < 0) {
  355. ceph_mdsc_put_request(req);
  356. return err;
  357. }
  358. dout("readdir got and parsed readdir result=%d on "
  359. "frag %x, end=%d, complete=%d, hash_order=%d\n",
  360. err, frag,
  361. (int)req->r_reply_info.dir_end,
  362. (int)req->r_reply_info.dir_complete,
  363. (int)req->r_reply_info.hash_order);
  364. rinfo = &req->r_reply_info;
  365. if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
  366. frag = le32_to_cpu(rinfo->dir_dir->frag);
  367. if (!rinfo->hash_order) {
  368. fi->next_offset = req->r_readdir_offset;
  369. /* adjust ctx->pos to beginning of frag */
  370. ctx->pos = ceph_make_fpos(frag,
  371. fi->next_offset,
  372. false);
  373. }
  374. }
  375. fi->frag = frag;
  376. fi->last_readdir = req;
  377. if (req->r_did_prepopulate) {
  378. fi->readdir_cache_idx = req->r_readdir_cache_idx;
  379. if (fi->readdir_cache_idx < 0) {
  380. /* preclude from marking dir ordered */
  381. fi->dir_ordered_count = 0;
  382. } else if (ceph_frag_is_leftmost(frag) &&
  383. fi->next_offset == 2) {
  384. /* note dir version at start of readdir so
  385. * we can tell if any dentries get dropped */
  386. fi->dir_release_count = req->r_dir_release_cnt;
  387. fi->dir_ordered_count = req->r_dir_ordered_cnt;
  388. }
  389. } else {
  390. dout("readdir !did_prepopulate");
  391. /* disable readdir cache */
  392. fi->readdir_cache_idx = -1;
  393. /* preclude from marking dir complete */
  394. fi->dir_release_count = 0;
  395. }
  396. /* note next offset and last dentry name */
  397. if (rinfo->dir_nr > 0) {
  398. struct ceph_mds_reply_dir_entry *rde =
  399. rinfo->dir_entries + (rinfo->dir_nr-1);
  400. unsigned next_offset = req->r_reply_info.dir_end ?
  401. 2 : (fpos_off(rde->offset) + 1);
  402. err = note_last_dentry(fi, rde->name, rde->name_len,
  403. next_offset);
  404. if (err)
  405. return err;
  406. } else if (req->r_reply_info.dir_end) {
  407. fi->next_offset = 2;
  408. /* keep last name */
  409. }
  410. }
  411. rinfo = &fi->last_readdir->r_reply_info;
  412. dout("readdir frag %x num %d pos %llx chunk first %llx\n",
  413. fi->frag, rinfo->dir_nr, ctx->pos,
  414. rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
  415. i = 0;
  416. /* search start position */
  417. if (rinfo->dir_nr > 0) {
  418. int step, nr = rinfo->dir_nr;
  419. while (nr > 0) {
  420. step = nr >> 1;
  421. if (rinfo->dir_entries[i + step].offset < ctx->pos) {
  422. i += step + 1;
  423. nr -= step + 1;
  424. } else {
  425. nr = step;
  426. }
  427. }
  428. }
  429. for (; i < rinfo->dir_nr; i++) {
  430. struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
  431. struct ceph_vino vino;
  432. ino_t ino;
  433. BUG_ON(rde->offset < ctx->pos);
  434. ctx->pos = rde->offset;
  435. dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
  436. i, rinfo->dir_nr, ctx->pos,
  437. rde->name_len, rde->name, &rde->inode.in);
  438. BUG_ON(!rde->inode.in);
  439. ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
  440. vino.ino = le64_to_cpu(rde->inode.in->ino);
  441. vino.snap = le64_to_cpu(rde->inode.in->snapid);
  442. ino = ceph_vino_to_ino(vino);
  443. if (!dir_emit(ctx, rde->name, rde->name_len,
  444. ceph_translate_ino(inode->i_sb, ino), ftype)) {
  445. dout("filldir stopping us...\n");
  446. return 0;
  447. }
  448. ctx->pos++;
  449. }
  450. if (fi->next_offset > 2) {
  451. ceph_mdsc_put_request(fi->last_readdir);
  452. fi->last_readdir = NULL;
  453. goto more;
  454. }
  455. /* more frags? */
  456. if (!ceph_frag_is_rightmost(fi->frag)) {
  457. unsigned frag = ceph_frag_next(fi->frag);
  458. if (is_hash_order(ctx->pos)) {
  459. loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
  460. fi->next_offset, true);
  461. if (new_pos > ctx->pos)
  462. ctx->pos = new_pos;
  463. /* keep last_name */
  464. } else {
  465. ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
  466. kfree(fi->last_name);
  467. fi->last_name = NULL;
  468. }
  469. dout("readdir next frag is %x\n", frag);
  470. goto more;
  471. }
  472. fi->flags |= CEPH_F_ATEND;
  473. /*
  474. * if dir_release_count still matches the dir, no dentries
  475. * were released during the whole readdir, and we should have
  476. * the complete dir contents in our cache.
  477. */
  478. if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
  479. spin_lock(&ci->i_ceph_lock);
  480. if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
  481. dout(" marking %p complete and ordered\n", inode);
  482. /* use i_size to track number of entries in
  483. * readdir cache */
  484. BUG_ON(fi->readdir_cache_idx < 0);
  485. i_size_write(inode, fi->readdir_cache_idx *
  486. sizeof(struct dentry*));
  487. } else {
  488. dout(" marking %p complete\n", inode);
  489. }
  490. __ceph_dir_set_complete(ci, fi->dir_release_count,
  491. fi->dir_ordered_count);
  492. spin_unlock(&ci->i_ceph_lock);
  493. }
  494. dout("readdir %p file %p done.\n", inode, file);
  495. return 0;
  496. }
  497. static void reset_readdir(struct ceph_file_info *fi)
  498. {
  499. if (fi->last_readdir) {
  500. ceph_mdsc_put_request(fi->last_readdir);
  501. fi->last_readdir = NULL;
  502. }
  503. kfree(fi->last_name);
  504. fi->last_name = NULL;
  505. fi->dir_release_count = 0;
  506. fi->readdir_cache_idx = -1;
  507. fi->next_offset = 2; /* compensate for . and .. */
  508. fi->flags &= ~CEPH_F_ATEND;
  509. }
  510. /*
  511. * discard buffered readdir content on seekdir(0), or seek to new frag,
  512. * or seek prior to current chunk
  513. */
  514. static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
  515. {
  516. struct ceph_mds_reply_info_parsed *rinfo;
  517. loff_t chunk_offset;
  518. if (new_pos == 0)
  519. return true;
  520. if (is_hash_order(new_pos)) {
  521. /* no need to reset last_name for a forward seek when
  522. * dentries are sotred in hash order */
  523. } else if (fi->frag != fpos_frag(new_pos)) {
  524. return true;
  525. }
  526. rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
  527. if (!rinfo || !rinfo->dir_nr)
  528. return true;
  529. chunk_offset = rinfo->dir_entries[0].offset;
  530. return new_pos < chunk_offset ||
  531. is_hash_order(new_pos) != is_hash_order(chunk_offset);
  532. }
  533. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
  534. {
  535. struct ceph_file_info *fi = file->private_data;
  536. struct inode *inode = file->f_mapping->host;
  537. loff_t retval;
  538. inode_lock(inode);
  539. retval = -EINVAL;
  540. switch (whence) {
  541. case SEEK_CUR:
  542. offset += file->f_pos;
  543. case SEEK_SET:
  544. break;
  545. case SEEK_END:
  546. retval = -EOPNOTSUPP;
  547. default:
  548. goto out;
  549. }
  550. if (offset >= 0) {
  551. if (need_reset_readdir(fi, offset)) {
  552. dout("dir_llseek dropping %p content\n", file);
  553. reset_readdir(fi);
  554. } else if (is_hash_order(offset) && offset > file->f_pos) {
  555. /* for hash offset, we don't know if a forward seek
  556. * is within same frag */
  557. fi->dir_release_count = 0;
  558. fi->readdir_cache_idx = -1;
  559. }
  560. if (offset != file->f_pos) {
  561. file->f_pos = offset;
  562. file->f_version = 0;
  563. fi->flags &= ~CEPH_F_ATEND;
  564. }
  565. retval = offset;
  566. }
  567. out:
  568. inode_unlock(inode);
  569. return retval;
  570. }
  571. /*
  572. * Handle lookups for the hidden .snap directory.
  573. */
  574. int ceph_handle_snapdir(struct ceph_mds_request *req,
  575. struct dentry *dentry, int err)
  576. {
  577. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  578. struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
  579. /* .snap dir? */
  580. if (err == -ENOENT &&
  581. ceph_snap(parent) == CEPH_NOSNAP &&
  582. strcmp(dentry->d_name.name,
  583. fsc->mount_options->snapdir_name) == 0) {
  584. struct inode *inode = ceph_get_snapdir(parent);
  585. dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
  586. dentry, dentry, inode);
  587. BUG_ON(!d_unhashed(dentry));
  588. d_add(dentry, inode);
  589. err = 0;
  590. }
  591. return err;
  592. }
  593. /*
  594. * Figure out final result of a lookup/open request.
  595. *
  596. * Mainly, make sure we return the final req->r_dentry (if it already
  597. * existed) in place of the original VFS-provided dentry when they
  598. * differ.
  599. *
  600. * Gracefully handle the case where the MDS replies with -ENOENT and
  601. * no trace (which it may do, at its discretion, e.g., if it doesn't
  602. * care to issue a lease on the negative dentry).
  603. */
  604. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  605. struct dentry *dentry, int err)
  606. {
  607. if (err == -ENOENT) {
  608. /* no trace? */
  609. err = 0;
  610. if (!req->r_reply_info.head->is_dentry) {
  611. dout("ENOENT and no trace, dentry %p inode %p\n",
  612. dentry, d_inode(dentry));
  613. if (d_really_is_positive(dentry)) {
  614. d_drop(dentry);
  615. err = -ENOENT;
  616. } else {
  617. d_add(dentry, NULL);
  618. }
  619. }
  620. }
  621. if (err)
  622. dentry = ERR_PTR(err);
  623. else if (dentry != req->r_dentry)
  624. dentry = dget(req->r_dentry); /* we got spliced */
  625. else
  626. dentry = NULL;
  627. return dentry;
  628. }
  629. static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  630. {
  631. return ceph_ino(inode) == CEPH_INO_ROOT &&
  632. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  633. }
  634. /*
  635. * Look up a single dir entry. If there is a lookup intent, inform
  636. * the MDS so that it gets our 'caps wanted' value in a single op.
  637. */
  638. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  639. unsigned int flags)
  640. {
  641. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  642. struct ceph_mds_client *mdsc = fsc->mdsc;
  643. struct ceph_mds_request *req;
  644. int op;
  645. int mask;
  646. int err;
  647. dout("lookup %p dentry %p '%pd'\n",
  648. dir, dentry, dentry);
  649. if (dentry->d_name.len > NAME_MAX)
  650. return ERR_PTR(-ENAMETOOLONG);
  651. /* can we conclude ENOENT locally? */
  652. if (d_really_is_negative(dentry)) {
  653. struct ceph_inode_info *ci = ceph_inode(dir);
  654. struct ceph_dentry_info *di = ceph_dentry(dentry);
  655. spin_lock(&ci->i_ceph_lock);
  656. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  657. if (strncmp(dentry->d_name.name,
  658. fsc->mount_options->snapdir_name,
  659. dentry->d_name.len) &&
  660. !is_root_ceph_dentry(dir, dentry) &&
  661. ceph_test_mount_opt(fsc, DCACHE) &&
  662. __ceph_dir_is_complete(ci) &&
  663. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  664. spin_unlock(&ci->i_ceph_lock);
  665. dout(" dir %p complete, -ENOENT\n", dir);
  666. d_add(dentry, NULL);
  667. di->lease_shared_gen = ci->i_shared_gen;
  668. return NULL;
  669. }
  670. spin_unlock(&ci->i_ceph_lock);
  671. }
  672. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  673. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  674. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  675. if (IS_ERR(req))
  676. return ERR_CAST(req);
  677. req->r_dentry = dget(dentry);
  678. req->r_num_caps = 2;
  679. mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
  680. if (ceph_security_xattr_wanted(dir))
  681. mask |= CEPH_CAP_XATTR_SHARED;
  682. req->r_args.getattr.mask = cpu_to_le32(mask);
  683. req->r_locked_dir = dir;
  684. err = ceph_mdsc_do_request(mdsc, NULL, req);
  685. err = ceph_handle_snapdir(req, dentry, err);
  686. dentry = ceph_finish_lookup(req, dentry, err);
  687. ceph_mdsc_put_request(req); /* will dput(dentry) */
  688. dout("lookup result=%p\n", dentry);
  689. return dentry;
  690. }
  691. /*
  692. * If we do a create but get no trace back from the MDS, follow up with
  693. * a lookup (the VFS expects us to link up the provided dentry).
  694. */
  695. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  696. {
  697. struct dentry *result = ceph_lookup(dir, dentry, 0);
  698. if (result && !IS_ERR(result)) {
  699. /*
  700. * We created the item, then did a lookup, and found
  701. * it was already linked to another inode we already
  702. * had in our cache (and thus got spliced). To not
  703. * confuse VFS (especially when inode is a directory),
  704. * we don't link our dentry to that inode, return an
  705. * error instead.
  706. *
  707. * This event should be rare and it happens only when
  708. * we talk to old MDS. Recent MDS does not send traceless
  709. * reply for request that creates new inode.
  710. */
  711. d_drop(result);
  712. return -ESTALE;
  713. }
  714. return PTR_ERR(result);
  715. }
  716. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  717. umode_t mode, dev_t rdev)
  718. {
  719. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  720. struct ceph_mds_client *mdsc = fsc->mdsc;
  721. struct ceph_mds_request *req;
  722. struct ceph_acls_info acls = {};
  723. int err;
  724. if (ceph_snap(dir) != CEPH_NOSNAP)
  725. return -EROFS;
  726. err = ceph_pre_init_acls(dir, &mode, &acls);
  727. if (err < 0)
  728. return err;
  729. dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
  730. dir, dentry, mode, rdev);
  731. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  732. if (IS_ERR(req)) {
  733. err = PTR_ERR(req);
  734. goto out;
  735. }
  736. req->r_dentry = dget(dentry);
  737. req->r_num_caps = 2;
  738. req->r_locked_dir = dir;
  739. req->r_args.mknod.mode = cpu_to_le32(mode);
  740. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  741. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  742. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  743. if (acls.pagelist) {
  744. req->r_pagelist = acls.pagelist;
  745. acls.pagelist = NULL;
  746. }
  747. err = ceph_mdsc_do_request(mdsc, dir, req);
  748. if (!err && !req->r_reply_info.head->is_dentry)
  749. err = ceph_handle_notrace_create(dir, dentry);
  750. ceph_mdsc_put_request(req);
  751. out:
  752. if (!err)
  753. ceph_init_inode_acls(d_inode(dentry), &acls);
  754. else
  755. d_drop(dentry);
  756. ceph_release_acls_info(&acls);
  757. return err;
  758. }
  759. static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  760. bool excl)
  761. {
  762. return ceph_mknod(dir, dentry, mode, 0);
  763. }
  764. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  765. const char *dest)
  766. {
  767. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  768. struct ceph_mds_client *mdsc = fsc->mdsc;
  769. struct ceph_mds_request *req;
  770. int err;
  771. if (ceph_snap(dir) != CEPH_NOSNAP)
  772. return -EROFS;
  773. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  774. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  775. if (IS_ERR(req)) {
  776. err = PTR_ERR(req);
  777. goto out;
  778. }
  779. req->r_path2 = kstrdup(dest, GFP_KERNEL);
  780. if (!req->r_path2) {
  781. err = -ENOMEM;
  782. ceph_mdsc_put_request(req);
  783. goto out;
  784. }
  785. req->r_locked_dir = dir;
  786. req->r_dentry = dget(dentry);
  787. req->r_num_caps = 2;
  788. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  789. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  790. err = ceph_mdsc_do_request(mdsc, dir, req);
  791. if (!err && !req->r_reply_info.head->is_dentry)
  792. err = ceph_handle_notrace_create(dir, dentry);
  793. ceph_mdsc_put_request(req);
  794. out:
  795. if (err)
  796. d_drop(dentry);
  797. return err;
  798. }
  799. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  800. {
  801. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  802. struct ceph_mds_client *mdsc = fsc->mdsc;
  803. struct ceph_mds_request *req;
  804. struct ceph_acls_info acls = {};
  805. int err = -EROFS;
  806. int op;
  807. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  808. /* mkdir .snap/foo is a MKSNAP */
  809. op = CEPH_MDS_OP_MKSNAP;
  810. dout("mksnap dir %p snap '%pd' dn %p\n", dir,
  811. dentry, dentry);
  812. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  813. dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
  814. op = CEPH_MDS_OP_MKDIR;
  815. } else {
  816. goto out;
  817. }
  818. mode |= S_IFDIR;
  819. err = ceph_pre_init_acls(dir, &mode, &acls);
  820. if (err < 0)
  821. goto out;
  822. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  823. if (IS_ERR(req)) {
  824. err = PTR_ERR(req);
  825. goto out;
  826. }
  827. req->r_dentry = dget(dentry);
  828. req->r_num_caps = 2;
  829. req->r_locked_dir = dir;
  830. req->r_args.mkdir.mode = cpu_to_le32(mode);
  831. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  832. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  833. if (acls.pagelist) {
  834. req->r_pagelist = acls.pagelist;
  835. acls.pagelist = NULL;
  836. }
  837. err = ceph_mdsc_do_request(mdsc, dir, req);
  838. if (!err &&
  839. !req->r_reply_info.head->is_target &&
  840. !req->r_reply_info.head->is_dentry)
  841. err = ceph_handle_notrace_create(dir, dentry);
  842. ceph_mdsc_put_request(req);
  843. out:
  844. if (!err)
  845. ceph_init_inode_acls(d_inode(dentry), &acls);
  846. else
  847. d_drop(dentry);
  848. ceph_release_acls_info(&acls);
  849. return err;
  850. }
  851. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  852. struct dentry *dentry)
  853. {
  854. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  855. struct ceph_mds_client *mdsc = fsc->mdsc;
  856. struct ceph_mds_request *req;
  857. int err;
  858. if (ceph_snap(dir) != CEPH_NOSNAP)
  859. return -EROFS;
  860. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  861. old_dentry, dentry);
  862. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  863. if (IS_ERR(req)) {
  864. d_drop(dentry);
  865. return PTR_ERR(req);
  866. }
  867. req->r_dentry = dget(dentry);
  868. req->r_num_caps = 2;
  869. req->r_old_dentry = dget(old_dentry);
  870. req->r_locked_dir = dir;
  871. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  872. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  873. /* release LINK_SHARED on source inode (mds will lock it) */
  874. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  875. err = ceph_mdsc_do_request(mdsc, dir, req);
  876. if (err) {
  877. d_drop(dentry);
  878. } else if (!req->r_reply_info.head->is_dentry) {
  879. ihold(d_inode(old_dentry));
  880. d_instantiate(dentry, d_inode(old_dentry));
  881. }
  882. ceph_mdsc_put_request(req);
  883. return err;
  884. }
  885. /*
  886. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  887. * looks like the link count will hit 0, drop any other caps (other
  888. * than PIN) we don't specifically want (due to the file still being
  889. * open).
  890. */
  891. static int drop_caps_for_unlink(struct inode *inode)
  892. {
  893. struct ceph_inode_info *ci = ceph_inode(inode);
  894. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  895. spin_lock(&ci->i_ceph_lock);
  896. if (inode->i_nlink == 1) {
  897. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  898. ci->i_ceph_flags |= CEPH_I_NODELAY;
  899. }
  900. spin_unlock(&ci->i_ceph_lock);
  901. return drop;
  902. }
  903. /*
  904. * rmdir and unlink are differ only by the metadata op code
  905. */
  906. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  907. {
  908. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  909. struct ceph_mds_client *mdsc = fsc->mdsc;
  910. struct inode *inode = d_inode(dentry);
  911. struct ceph_mds_request *req;
  912. int err = -EROFS;
  913. int op;
  914. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  915. /* rmdir .snap/foo is RMSNAP */
  916. dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
  917. op = CEPH_MDS_OP_RMSNAP;
  918. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  919. dout("unlink/rmdir dir %p dn %p inode %p\n",
  920. dir, dentry, inode);
  921. op = d_is_dir(dentry) ?
  922. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  923. } else
  924. goto out;
  925. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  926. if (IS_ERR(req)) {
  927. err = PTR_ERR(req);
  928. goto out;
  929. }
  930. req->r_dentry = dget(dentry);
  931. req->r_num_caps = 2;
  932. req->r_locked_dir = dir;
  933. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  934. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  935. req->r_inode_drop = drop_caps_for_unlink(inode);
  936. err = ceph_mdsc_do_request(mdsc, dir, req);
  937. if (!err && !req->r_reply_info.head->is_dentry)
  938. d_delete(dentry);
  939. ceph_mdsc_put_request(req);
  940. out:
  941. return err;
  942. }
  943. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  944. struct inode *new_dir, struct dentry *new_dentry,
  945. unsigned int flags)
  946. {
  947. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  948. struct ceph_mds_client *mdsc = fsc->mdsc;
  949. struct ceph_mds_request *req;
  950. int op = CEPH_MDS_OP_RENAME;
  951. int err;
  952. if (flags)
  953. return -EINVAL;
  954. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  955. return -EXDEV;
  956. if (ceph_snap(old_dir) != CEPH_NOSNAP) {
  957. if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
  958. op = CEPH_MDS_OP_RENAMESNAP;
  959. else
  960. return -EROFS;
  961. }
  962. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  963. old_dir, old_dentry, new_dir, new_dentry);
  964. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  965. if (IS_ERR(req))
  966. return PTR_ERR(req);
  967. ihold(old_dir);
  968. req->r_dentry = dget(new_dentry);
  969. req->r_num_caps = 2;
  970. req->r_old_dentry = dget(old_dentry);
  971. req->r_old_dentry_dir = old_dir;
  972. req->r_locked_dir = new_dir;
  973. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  974. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  975. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  976. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  977. /* release LINK_RDCACHE on source inode (mds will lock it) */
  978. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  979. if (d_really_is_positive(new_dentry))
  980. req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
  981. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  982. if (!err && !req->r_reply_info.head->is_dentry) {
  983. /*
  984. * Normally d_move() is done by fill_trace (called by
  985. * do_request, above). If there is no trace, we need
  986. * to do it here.
  987. */
  988. /* d_move screws up sibling dentries' offsets */
  989. ceph_dir_clear_complete(old_dir);
  990. ceph_dir_clear_complete(new_dir);
  991. d_move(old_dentry, new_dentry);
  992. /* ensure target dentry is invalidated, despite
  993. rehashing bug in vfs_rename_dir */
  994. ceph_invalidate_dentry_lease(new_dentry);
  995. }
  996. ceph_mdsc_put_request(req);
  997. return err;
  998. }
  999. /*
  1000. * Ensure a dentry lease will no longer revalidate.
  1001. */
  1002. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  1003. {
  1004. spin_lock(&dentry->d_lock);
  1005. ceph_dentry(dentry)->time = jiffies;
  1006. ceph_dentry(dentry)->lease_shared_gen = 0;
  1007. spin_unlock(&dentry->d_lock);
  1008. }
  1009. /*
  1010. * Check if dentry lease is valid. If not, delete the lease. Try to
  1011. * renew if the least is more than half up.
  1012. */
  1013. static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
  1014. struct inode *dir)
  1015. {
  1016. struct ceph_dentry_info *di;
  1017. struct ceph_mds_session *s;
  1018. int valid = 0;
  1019. u32 gen;
  1020. unsigned long ttl;
  1021. struct ceph_mds_session *session = NULL;
  1022. u32 seq = 0;
  1023. spin_lock(&dentry->d_lock);
  1024. di = ceph_dentry(dentry);
  1025. if (di && di->lease_session) {
  1026. s = di->lease_session;
  1027. spin_lock(&s->s_gen_ttl_lock);
  1028. gen = s->s_cap_gen;
  1029. ttl = s->s_cap_ttl;
  1030. spin_unlock(&s->s_gen_ttl_lock);
  1031. if (di->lease_gen == gen &&
  1032. time_before(jiffies, di->time) &&
  1033. time_before(jiffies, ttl)) {
  1034. valid = 1;
  1035. if (di->lease_renew_after &&
  1036. time_after(jiffies, di->lease_renew_after)) {
  1037. /*
  1038. * We should renew. If we're in RCU walk mode
  1039. * though, we can't do that so just return
  1040. * -ECHILD.
  1041. */
  1042. if (flags & LOOKUP_RCU) {
  1043. valid = -ECHILD;
  1044. } else {
  1045. session = ceph_get_mds_session(s);
  1046. seq = di->lease_seq;
  1047. di->lease_renew_after = 0;
  1048. di->lease_renew_from = jiffies;
  1049. }
  1050. }
  1051. }
  1052. }
  1053. spin_unlock(&dentry->d_lock);
  1054. if (session) {
  1055. ceph_mdsc_lease_send_msg(session, dir, dentry,
  1056. CEPH_MDS_LEASE_RENEW, seq);
  1057. ceph_put_mds_session(session);
  1058. }
  1059. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  1060. return valid;
  1061. }
  1062. /*
  1063. * Check if directory-wide content lease/cap is valid.
  1064. */
  1065. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  1066. {
  1067. struct ceph_inode_info *ci = ceph_inode(dir);
  1068. struct ceph_dentry_info *di = ceph_dentry(dentry);
  1069. int valid = 0;
  1070. spin_lock(&ci->i_ceph_lock);
  1071. if (ci->i_shared_gen == di->lease_shared_gen)
  1072. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  1073. spin_unlock(&ci->i_ceph_lock);
  1074. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  1075. dir, (unsigned)ci->i_shared_gen, dentry,
  1076. (unsigned)di->lease_shared_gen, valid);
  1077. return valid;
  1078. }
  1079. /*
  1080. * Check if cached dentry can be trusted.
  1081. */
  1082. static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
  1083. {
  1084. int valid = 0;
  1085. struct dentry *parent;
  1086. struct inode *dir;
  1087. if (flags & LOOKUP_RCU) {
  1088. parent = ACCESS_ONCE(dentry->d_parent);
  1089. dir = d_inode_rcu(parent);
  1090. if (!dir)
  1091. return -ECHILD;
  1092. } else {
  1093. parent = dget_parent(dentry);
  1094. dir = d_inode(parent);
  1095. }
  1096. dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
  1097. dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
  1098. /* always trust cached snapped dentries, snapdir dentry */
  1099. if (ceph_snap(dir) != CEPH_NOSNAP) {
  1100. dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
  1101. dentry, d_inode(dentry));
  1102. valid = 1;
  1103. } else if (d_really_is_positive(dentry) &&
  1104. ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
  1105. valid = 1;
  1106. } else {
  1107. valid = dentry_lease_is_valid(dentry, flags, dir);
  1108. if (valid == -ECHILD)
  1109. return valid;
  1110. if (valid || dir_lease_is_valid(dir, dentry)) {
  1111. if (d_really_is_positive(dentry))
  1112. valid = ceph_is_any_caps(d_inode(dentry));
  1113. else
  1114. valid = 1;
  1115. }
  1116. }
  1117. if (!valid) {
  1118. struct ceph_mds_client *mdsc =
  1119. ceph_sb_to_client(dir->i_sb)->mdsc;
  1120. struct ceph_mds_request *req;
  1121. int op, err;
  1122. u32 mask;
  1123. if (flags & LOOKUP_RCU)
  1124. return -ECHILD;
  1125. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  1126. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_GETATTR;
  1127. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  1128. if (!IS_ERR(req)) {
  1129. req->r_dentry = dget(dentry);
  1130. req->r_num_caps = op == CEPH_MDS_OP_GETATTR ? 1 : 2;
  1131. mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
  1132. if (ceph_security_xattr_wanted(dir))
  1133. mask |= CEPH_CAP_XATTR_SHARED;
  1134. req->r_args.getattr.mask = cpu_to_le32(mask);
  1135. err = ceph_mdsc_do_request(mdsc, NULL, req);
  1136. switch (err) {
  1137. case 0:
  1138. if (d_really_is_positive(dentry) &&
  1139. d_inode(dentry) == req->r_target_inode)
  1140. valid = 1;
  1141. break;
  1142. case -ENOENT:
  1143. if (d_really_is_negative(dentry))
  1144. valid = 1;
  1145. /* Fallthrough */
  1146. default:
  1147. break;
  1148. }
  1149. ceph_mdsc_put_request(req);
  1150. dout("d_revalidate %p lookup result=%d\n",
  1151. dentry, err);
  1152. }
  1153. }
  1154. dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
  1155. if (valid) {
  1156. ceph_dentry_lru_touch(dentry);
  1157. } else {
  1158. ceph_dir_clear_complete(dir);
  1159. }
  1160. if (!(flags & LOOKUP_RCU))
  1161. dput(parent);
  1162. return valid;
  1163. }
  1164. /*
  1165. * Release our ceph_dentry_info.
  1166. */
  1167. static void ceph_d_release(struct dentry *dentry)
  1168. {
  1169. struct ceph_dentry_info *di = ceph_dentry(dentry);
  1170. dout("d_release %p\n", dentry);
  1171. ceph_dentry_lru_del(dentry);
  1172. spin_lock(&dentry->d_lock);
  1173. dentry->d_fsdata = NULL;
  1174. spin_unlock(&dentry->d_lock);
  1175. if (di->lease_session)
  1176. ceph_put_mds_session(di->lease_session);
  1177. kmem_cache_free(ceph_dentry_cachep, di);
  1178. }
  1179. /*
  1180. * When the VFS prunes a dentry from the cache, we need to clear the
  1181. * complete flag on the parent directory.
  1182. *
  1183. * Called under dentry->d_lock.
  1184. */
  1185. static void ceph_d_prune(struct dentry *dentry)
  1186. {
  1187. dout("ceph_d_prune %p\n", dentry);
  1188. /* do we have a valid parent? */
  1189. if (IS_ROOT(dentry))
  1190. return;
  1191. /* if we are not hashed, we don't affect dir's completeness */
  1192. if (d_unhashed(dentry))
  1193. return;
  1194. if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
  1195. return;
  1196. /*
  1197. * we hold d_lock, so d_parent is stable, and d_fsdata is never
  1198. * cleared until d_release
  1199. */
  1200. ceph_dir_clear_complete(d_inode(dentry->d_parent));
  1201. }
  1202. /*
  1203. * read() on a dir. This weird interface hack only works if mounted
  1204. * with '-o dirstat'.
  1205. */
  1206. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  1207. loff_t *ppos)
  1208. {
  1209. struct ceph_file_info *cf = file->private_data;
  1210. struct inode *inode = file_inode(file);
  1211. struct ceph_inode_info *ci = ceph_inode(inode);
  1212. int left;
  1213. const int bufsize = 1024;
  1214. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  1215. return -EISDIR;
  1216. if (!cf->dir_info) {
  1217. cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
  1218. if (!cf->dir_info)
  1219. return -ENOMEM;
  1220. cf->dir_info_len =
  1221. snprintf(cf->dir_info, bufsize,
  1222. "entries: %20lld\n"
  1223. " files: %20lld\n"
  1224. " subdirs: %20lld\n"
  1225. "rentries: %20lld\n"
  1226. " rfiles: %20lld\n"
  1227. " rsubdirs: %20lld\n"
  1228. "rbytes: %20lld\n"
  1229. "rctime: %10ld.%09ld\n",
  1230. ci->i_files + ci->i_subdirs,
  1231. ci->i_files,
  1232. ci->i_subdirs,
  1233. ci->i_rfiles + ci->i_rsubdirs,
  1234. ci->i_rfiles,
  1235. ci->i_rsubdirs,
  1236. ci->i_rbytes,
  1237. (long)ci->i_rctime.tv_sec,
  1238. (long)ci->i_rctime.tv_nsec);
  1239. }
  1240. if (*ppos >= cf->dir_info_len)
  1241. return 0;
  1242. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  1243. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  1244. if (left == size)
  1245. return -EFAULT;
  1246. *ppos += (size - left);
  1247. return size - left;
  1248. }
  1249. /*
  1250. * We maintain a private dentry LRU.
  1251. *
  1252. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1253. */
  1254. void ceph_dentry_lru_add(struct dentry *dn)
  1255. {
  1256. struct ceph_dentry_info *di = ceph_dentry(dn);
  1257. struct ceph_mds_client *mdsc;
  1258. dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
  1259. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1260. spin_lock(&mdsc->dentry_lru_lock);
  1261. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1262. mdsc->num_dentry++;
  1263. spin_unlock(&mdsc->dentry_lru_lock);
  1264. }
  1265. void ceph_dentry_lru_touch(struct dentry *dn)
  1266. {
  1267. struct ceph_dentry_info *di = ceph_dentry(dn);
  1268. struct ceph_mds_client *mdsc;
  1269. dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
  1270. di->offset);
  1271. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1272. spin_lock(&mdsc->dentry_lru_lock);
  1273. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1274. spin_unlock(&mdsc->dentry_lru_lock);
  1275. }
  1276. void ceph_dentry_lru_del(struct dentry *dn)
  1277. {
  1278. struct ceph_dentry_info *di = ceph_dentry(dn);
  1279. struct ceph_mds_client *mdsc;
  1280. dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
  1281. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1282. spin_lock(&mdsc->dentry_lru_lock);
  1283. list_del_init(&di->lru);
  1284. mdsc->num_dentry--;
  1285. spin_unlock(&mdsc->dentry_lru_lock);
  1286. }
  1287. /*
  1288. * Return name hash for a given dentry. This is dependent on
  1289. * the parent directory's hash function.
  1290. */
  1291. unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
  1292. {
  1293. struct ceph_inode_info *dci = ceph_inode(dir);
  1294. switch (dci->i_dir_layout.dl_dir_hash) {
  1295. case 0: /* for backward compat */
  1296. case CEPH_STR_HASH_LINUX:
  1297. return dn->d_name.hash;
  1298. default:
  1299. return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
  1300. dn->d_name.name, dn->d_name.len);
  1301. }
  1302. }
  1303. const struct file_operations ceph_dir_fops = {
  1304. .read = ceph_read_dir,
  1305. .iterate = ceph_readdir,
  1306. .llseek = ceph_dir_llseek,
  1307. .open = ceph_open,
  1308. .release = ceph_release,
  1309. .unlocked_ioctl = ceph_ioctl,
  1310. .fsync = ceph_fsync,
  1311. };
  1312. const struct file_operations ceph_snapdir_fops = {
  1313. .iterate = ceph_readdir,
  1314. .llseek = ceph_dir_llseek,
  1315. .open = ceph_open,
  1316. .release = ceph_release,
  1317. };
  1318. const struct inode_operations ceph_dir_iops = {
  1319. .lookup = ceph_lookup,
  1320. .permission = ceph_permission,
  1321. .getattr = ceph_getattr,
  1322. .setattr = ceph_setattr,
  1323. .listxattr = ceph_listxattr,
  1324. .get_acl = ceph_get_acl,
  1325. .set_acl = ceph_set_acl,
  1326. .mknod = ceph_mknod,
  1327. .symlink = ceph_symlink,
  1328. .mkdir = ceph_mkdir,
  1329. .link = ceph_link,
  1330. .unlink = ceph_unlink,
  1331. .rmdir = ceph_unlink,
  1332. .rename = ceph_rename,
  1333. .create = ceph_create,
  1334. .atomic_open = ceph_atomic_open,
  1335. };
  1336. const struct inode_operations ceph_snapdir_iops = {
  1337. .lookup = ceph_lookup,
  1338. .permission = ceph_permission,
  1339. .getattr = ceph_getattr,
  1340. .mkdir = ceph_mkdir,
  1341. .rmdir = ceph_unlink,
  1342. .rename = ceph_rename,
  1343. };
  1344. const struct dentry_operations ceph_dentry_ops = {
  1345. .d_revalidate = ceph_d_revalidate,
  1346. .d_release = ceph_d_release,
  1347. .d_prune = ceph_d_prune,
  1348. .d_init = ceph_d_init,
  1349. };