file.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/file.h>
  6. #include <linux/mount.h>
  7. #include <linux/namei.h>
  8. #include <linux/writeback.h>
  9. #include <linux/aio.h>
  10. #include <linux/falloc.h>
  11. #include "super.h"
  12. #include "mds_client.h"
  13. #include "cache.h"
  14. /*
  15. * Ceph file operations
  16. *
  17. * Implement basic open/close functionality, and implement
  18. * read/write.
  19. *
  20. * We implement three modes of file I/O:
  21. * - buffered uses the generic_file_aio_{read,write} helpers
  22. *
  23. * - synchronous is used when there is multi-client read/write
  24. * sharing, avoids the page cache, and synchronously waits for an
  25. * ack from the OSD.
  26. *
  27. * - direct io takes the variant of the sync path that references
  28. * user pages directly.
  29. *
  30. * fsync() flushes and waits on dirty pages, but just queues metadata
  31. * for writeback: since the MDS can recover size and mtime there is no
  32. * need to wait for MDS acknowledgement.
  33. */
  34. /*
  35. * Prepare an open request. Preallocate ceph_cap to avoid an
  36. * inopportune ENOMEM later.
  37. */
  38. static struct ceph_mds_request *
  39. prepare_open_request(struct super_block *sb, int flags, int create_mode)
  40. {
  41. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  42. struct ceph_mds_client *mdsc = fsc->mdsc;
  43. struct ceph_mds_request *req;
  44. int want_auth = USE_ANY_MDS;
  45. int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
  46. if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
  47. want_auth = USE_AUTH_MDS;
  48. req = ceph_mdsc_create_request(mdsc, op, want_auth);
  49. if (IS_ERR(req))
  50. goto out;
  51. req->r_fmode = ceph_flags_to_mode(flags);
  52. req->r_args.open.flags = cpu_to_le32(flags);
  53. req->r_args.open.mode = cpu_to_le32(create_mode);
  54. out:
  55. return req;
  56. }
  57. /*
  58. * initialize private struct file data.
  59. * if we fail, clean up by dropping fmode reference on the ceph_inode
  60. */
  61. static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
  62. {
  63. struct ceph_file_info *cf;
  64. int ret = 0;
  65. struct ceph_inode_info *ci = ceph_inode(inode);
  66. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  67. struct ceph_mds_client *mdsc = fsc->mdsc;
  68. switch (inode->i_mode & S_IFMT) {
  69. case S_IFREG:
  70. /* First file open request creates the cookie, we want to keep
  71. * this cookie around for the filetime of the inode as not to
  72. * have to worry about fscache register / revoke / operation
  73. * races.
  74. *
  75. * Also, if we know the operation is going to invalidate data
  76. * (non readonly) just nuke the cache right away.
  77. */
  78. ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
  79. if ((fmode & CEPH_FILE_MODE_WR))
  80. ceph_fscache_invalidate(inode);
  81. case S_IFDIR:
  82. dout("init_file %p %p 0%o (regular)\n", inode, file,
  83. inode->i_mode);
  84. cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
  85. if (cf == NULL) {
  86. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  87. return -ENOMEM;
  88. }
  89. cf->fmode = fmode;
  90. cf->next_offset = 2;
  91. file->private_data = cf;
  92. BUG_ON(inode->i_fop->release != ceph_release);
  93. break;
  94. case S_IFLNK:
  95. dout("init_file %p %p 0%o (symlink)\n", inode, file,
  96. inode->i_mode);
  97. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  98. break;
  99. default:
  100. dout("init_file %p %p 0%o (special)\n", inode, file,
  101. inode->i_mode);
  102. /*
  103. * we need to drop the open ref now, since we don't
  104. * have .release set to ceph_release.
  105. */
  106. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  107. BUG_ON(inode->i_fop->release == ceph_release);
  108. /* call the proper open fop */
  109. ret = inode->i_fop->open(inode, file);
  110. }
  111. return ret;
  112. }
  113. /*
  114. * If we already have the requisite capabilities, we can satisfy
  115. * the open request locally (no need to request new caps from the
  116. * MDS). We do, however, need to inform the MDS (asynchronously)
  117. * if our wanted caps set expands.
  118. */
  119. int ceph_open(struct inode *inode, struct file *file)
  120. {
  121. struct ceph_inode_info *ci = ceph_inode(inode);
  122. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  123. struct ceph_mds_client *mdsc = fsc->mdsc;
  124. struct ceph_mds_request *req;
  125. struct ceph_file_info *cf = file->private_data;
  126. struct inode *parent_inode = NULL;
  127. int err;
  128. int flags, fmode, wanted;
  129. if (cf) {
  130. dout("open file %p is already opened\n", file);
  131. return 0;
  132. }
  133. /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
  134. flags = file->f_flags & ~(O_CREAT|O_EXCL);
  135. if (S_ISDIR(inode->i_mode))
  136. flags = O_DIRECTORY; /* mds likes to know */
  137. dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
  138. ceph_vinop(inode), file, flags, file->f_flags);
  139. fmode = ceph_flags_to_mode(flags);
  140. wanted = ceph_caps_for_mode(fmode);
  141. /* snapped files are read-only */
  142. if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
  143. return -EROFS;
  144. /* trivially open snapdir */
  145. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  146. spin_lock(&ci->i_ceph_lock);
  147. __ceph_get_fmode(ci, fmode);
  148. spin_unlock(&ci->i_ceph_lock);
  149. return ceph_init_file(inode, file, fmode);
  150. }
  151. /*
  152. * No need to block if we have caps on the auth MDS (for
  153. * write) or any MDS (for read). Update wanted set
  154. * asynchronously.
  155. */
  156. spin_lock(&ci->i_ceph_lock);
  157. if (__ceph_is_any_real_caps(ci) &&
  158. (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
  159. int mds_wanted = __ceph_caps_mds_wanted(ci);
  160. int issued = __ceph_caps_issued(ci, NULL);
  161. dout("open %p fmode %d want %s issued %s using existing\n",
  162. inode, fmode, ceph_cap_string(wanted),
  163. ceph_cap_string(issued));
  164. __ceph_get_fmode(ci, fmode);
  165. spin_unlock(&ci->i_ceph_lock);
  166. /* adjust wanted? */
  167. if ((issued & wanted) != wanted &&
  168. (mds_wanted & wanted) != wanted &&
  169. ceph_snap(inode) != CEPH_SNAPDIR)
  170. ceph_check_caps(ci, 0, NULL);
  171. return ceph_init_file(inode, file, fmode);
  172. } else if (ceph_snap(inode) != CEPH_NOSNAP &&
  173. (ci->i_snap_caps & wanted) == wanted) {
  174. __ceph_get_fmode(ci, fmode);
  175. spin_unlock(&ci->i_ceph_lock);
  176. return ceph_init_file(inode, file, fmode);
  177. }
  178. spin_unlock(&ci->i_ceph_lock);
  179. dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
  180. req = prepare_open_request(inode->i_sb, flags, 0);
  181. if (IS_ERR(req)) {
  182. err = PTR_ERR(req);
  183. goto out;
  184. }
  185. req->r_inode = inode;
  186. ihold(inode);
  187. req->r_num_caps = 1;
  188. if (flags & O_CREAT)
  189. parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
  190. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  191. iput(parent_inode);
  192. if (!err)
  193. err = ceph_init_file(inode, file, req->r_fmode);
  194. ceph_mdsc_put_request(req);
  195. dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
  196. out:
  197. return err;
  198. }
  199. /*
  200. * Do a lookup + open with a single request. If we get a non-existent
  201. * file or symlink, return 1 so the VFS can retry.
  202. */
  203. int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
  204. struct file *file, unsigned flags, umode_t mode,
  205. int *opened)
  206. {
  207. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  208. struct ceph_mds_client *mdsc = fsc->mdsc;
  209. struct ceph_mds_request *req;
  210. struct dentry *dn;
  211. int err;
  212. dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n",
  213. dir, dentry, dentry->d_name.len, dentry->d_name.name,
  214. d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
  215. if (dentry->d_name.len > NAME_MAX)
  216. return -ENAMETOOLONG;
  217. err = ceph_init_dentry(dentry);
  218. if (err < 0)
  219. return err;
  220. /* do the open */
  221. req = prepare_open_request(dir->i_sb, flags, mode);
  222. if (IS_ERR(req))
  223. return PTR_ERR(req);
  224. req->r_dentry = dget(dentry);
  225. req->r_num_caps = 2;
  226. if (flags & O_CREAT) {
  227. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  228. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  229. }
  230. req->r_locked_dir = dir; /* caller holds dir->i_mutex */
  231. err = ceph_mdsc_do_request(mdsc,
  232. (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
  233. req);
  234. if (err)
  235. goto out_err;
  236. err = ceph_handle_snapdir(req, dentry, err);
  237. if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
  238. err = ceph_handle_notrace_create(dir, dentry);
  239. if (d_unhashed(dentry)) {
  240. dn = ceph_finish_lookup(req, dentry, err);
  241. if (IS_ERR(dn))
  242. err = PTR_ERR(dn);
  243. } else {
  244. /* we were given a hashed negative dentry */
  245. dn = NULL;
  246. }
  247. if (err)
  248. goto out_err;
  249. if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
  250. /* make vfs retry on splice, ENOENT, or symlink */
  251. dout("atomic_open finish_no_open on dn %p\n", dn);
  252. err = finish_no_open(file, dn);
  253. } else {
  254. dout("atomic_open finish_open on dn %p\n", dn);
  255. if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
  256. ceph_init_acl(dentry, dentry->d_inode, dir);
  257. *opened |= FILE_CREATED;
  258. }
  259. err = finish_open(file, dentry, ceph_open, opened);
  260. }
  261. out_err:
  262. if (!req->r_err && req->r_target_inode)
  263. ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
  264. ceph_mdsc_put_request(req);
  265. dout("atomic_open result=%d\n", err);
  266. return err;
  267. }
  268. int ceph_release(struct inode *inode, struct file *file)
  269. {
  270. struct ceph_inode_info *ci = ceph_inode(inode);
  271. struct ceph_file_info *cf = file->private_data;
  272. dout("release inode %p file %p\n", inode, file);
  273. ceph_put_fmode(ci, cf->fmode);
  274. if (cf->last_readdir)
  275. ceph_mdsc_put_request(cf->last_readdir);
  276. kfree(cf->last_name);
  277. kfree(cf->dir_info);
  278. dput(cf->dentry);
  279. kmem_cache_free(ceph_file_cachep, cf);
  280. /* wake up anyone waiting for caps on this inode */
  281. wake_up_all(&ci->i_cap_wq);
  282. return 0;
  283. }
  284. /*
  285. * Read a range of bytes striped over one or more objects. Iterate over
  286. * objects we stripe over. (That's not atomic, but good enough for now.)
  287. *
  288. * If we get a short result from the OSD, check against i_size; we need to
  289. * only return a short read to the caller if we hit EOF.
  290. */
  291. static int striped_read(struct inode *inode,
  292. u64 off, u64 len,
  293. struct page **pages, int num_pages,
  294. int *checkeof, bool o_direct,
  295. unsigned long buf_align)
  296. {
  297. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  298. struct ceph_inode_info *ci = ceph_inode(inode);
  299. u64 pos, this_len, left;
  300. int io_align, page_align;
  301. int pages_left;
  302. int read;
  303. struct page **page_pos;
  304. int ret;
  305. bool hit_stripe, was_short;
  306. /*
  307. * we may need to do multiple reads. not atomic, unfortunately.
  308. */
  309. pos = off;
  310. left = len;
  311. page_pos = pages;
  312. pages_left = num_pages;
  313. read = 0;
  314. io_align = off & ~PAGE_MASK;
  315. more:
  316. if (o_direct)
  317. page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
  318. else
  319. page_align = pos & ~PAGE_MASK;
  320. this_len = left;
  321. ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
  322. &ci->i_layout, pos, &this_len,
  323. ci->i_truncate_seq,
  324. ci->i_truncate_size,
  325. page_pos, pages_left, page_align);
  326. if (ret == -ENOENT)
  327. ret = 0;
  328. hit_stripe = this_len < left;
  329. was_short = ret >= 0 && ret < this_len;
  330. dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
  331. ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
  332. if (ret >= 0) {
  333. int didpages;
  334. if (was_short && (pos + ret < inode->i_size)) {
  335. u64 tmp = min(this_len - ret,
  336. inode->i_size - pos - ret);
  337. dout(" zero gap %llu to %llu\n",
  338. pos + ret, pos + ret + tmp);
  339. ceph_zero_page_vector_range(page_align + read + ret,
  340. tmp, pages);
  341. ret += tmp;
  342. }
  343. didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
  344. pos += ret;
  345. read = pos - off;
  346. left -= ret;
  347. page_pos += didpages;
  348. pages_left -= didpages;
  349. /* hit stripe and need continue*/
  350. if (left && hit_stripe && pos < inode->i_size)
  351. goto more;
  352. }
  353. if (read > 0) {
  354. ret = read;
  355. /* did we bounce off eof? */
  356. if (pos + left > inode->i_size)
  357. *checkeof = 1;
  358. }
  359. dout("striped_read returns %d\n", ret);
  360. return ret;
  361. }
  362. /*
  363. * Completely synchronous read and write methods. Direct from __user
  364. * buffer to osd, or directly to user pages (if O_DIRECT).
  365. *
  366. * If the read spans object boundary, just do multiple reads.
  367. */
  368. static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
  369. int *checkeof)
  370. {
  371. struct file *file = iocb->ki_filp;
  372. struct inode *inode = file_inode(file);
  373. struct page **pages;
  374. u64 off = iocb->ki_pos;
  375. int num_pages, ret;
  376. size_t len = iov_iter_count(i);
  377. dout("sync_read on file %p %llu~%u %s\n", file, off,
  378. (unsigned)len,
  379. (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
  380. if (!len)
  381. return 0;
  382. /*
  383. * flush any page cache pages in this range. this
  384. * will make concurrent normal and sync io slow,
  385. * but it will at least behave sensibly when they are
  386. * in sequence.
  387. */
  388. ret = filemap_write_and_wait_range(inode->i_mapping, off,
  389. off + len);
  390. if (ret < 0)
  391. return ret;
  392. if (file->f_flags & O_DIRECT) {
  393. while (iov_iter_count(i)) {
  394. size_t start;
  395. ssize_t n;
  396. n = iov_iter_get_pages_alloc(i, &pages, INT_MAX, &start);
  397. if (n < 0)
  398. return n;
  399. num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
  400. ret = striped_read(inode, off, n,
  401. pages, num_pages, checkeof,
  402. 1, start);
  403. ceph_put_page_vector(pages, num_pages, true);
  404. if (ret <= 0)
  405. break;
  406. off += ret;
  407. iov_iter_advance(i, ret);
  408. if (ret < n)
  409. break;
  410. }
  411. } else {
  412. num_pages = calc_pages_for(off, len);
  413. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  414. if (IS_ERR(pages))
  415. return PTR_ERR(pages);
  416. ret = striped_read(inode, off, len, pages,
  417. num_pages, checkeof, 0, 0);
  418. if (ret > 0) {
  419. int l, k = 0;
  420. size_t left = ret;
  421. while (left) {
  422. size_t page_off = off & ~PAGE_MASK;
  423. size_t copy = min_t(size_t,
  424. PAGE_SIZE - page_off, left);
  425. l = copy_page_to_iter(pages[k++], page_off,
  426. copy, i);
  427. off += l;
  428. left -= l;
  429. if (l < copy)
  430. break;
  431. }
  432. }
  433. ceph_release_page_vector(pages, num_pages);
  434. }
  435. if (off > iocb->ki_pos) {
  436. ret = off - iocb->ki_pos;
  437. iocb->ki_pos = off;
  438. }
  439. dout("sync_read result %d\n", ret);
  440. return ret;
  441. }
  442. /*
  443. * Write commit request unsafe callback, called to tell us when a
  444. * request is unsafe (that is, in flight--has been handed to the
  445. * messenger to send to its target osd). It is called again when
  446. * we've received a response message indicating the request is
  447. * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
  448. * is completed early (and unsuccessfully) due to a timeout or
  449. * interrupt.
  450. *
  451. * This is used if we requested both an ACK and ONDISK commit reply
  452. * from the OSD.
  453. */
  454. static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
  455. {
  456. struct ceph_inode_info *ci = ceph_inode(req->r_inode);
  457. dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
  458. unsafe ? "un" : "");
  459. if (unsafe) {
  460. ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
  461. spin_lock(&ci->i_unsafe_lock);
  462. list_add_tail(&req->r_unsafe_item,
  463. &ci->i_unsafe_writes);
  464. spin_unlock(&ci->i_unsafe_lock);
  465. } else {
  466. spin_lock(&ci->i_unsafe_lock);
  467. list_del_init(&req->r_unsafe_item);
  468. spin_unlock(&ci->i_unsafe_lock);
  469. ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
  470. }
  471. }
  472. /*
  473. * Synchronous write, straight from __user pointer or user pages.
  474. *
  475. * If write spans object boundary, just do multiple writes. (For a
  476. * correct atomic write, we should e.g. take write locks on all
  477. * objects, rollback on failure, etc.)
  478. */
  479. static ssize_t
  480. ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  481. {
  482. struct file *file = iocb->ki_filp;
  483. struct inode *inode = file_inode(file);
  484. struct ceph_inode_info *ci = ceph_inode(inode);
  485. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  486. struct ceph_snap_context *snapc;
  487. struct ceph_vino vino;
  488. struct ceph_osd_request *req;
  489. struct page **pages;
  490. int num_pages;
  491. int written = 0;
  492. int flags;
  493. int check_caps = 0;
  494. int ret;
  495. struct timespec mtime = CURRENT_TIME;
  496. size_t count = iov_iter_count(from);
  497. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  498. return -EROFS;
  499. dout("sync_direct_write on file %p %lld~%u\n", file, pos,
  500. (unsigned)count);
  501. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  502. if (ret < 0)
  503. return ret;
  504. ret = invalidate_inode_pages2_range(inode->i_mapping,
  505. pos >> PAGE_CACHE_SHIFT,
  506. (pos + count) >> PAGE_CACHE_SHIFT);
  507. if (ret < 0)
  508. dout("invalidate_inode_pages2_range returned %d\n", ret);
  509. flags = CEPH_OSD_FLAG_ORDERSNAP |
  510. CEPH_OSD_FLAG_ONDISK |
  511. CEPH_OSD_FLAG_WRITE;
  512. while (iov_iter_count(from) > 0) {
  513. u64 len = iov_iter_single_seg_count(from);
  514. size_t start;
  515. ssize_t n;
  516. snapc = ci->i_snap_realm->cached_context;
  517. vino = ceph_vino(inode);
  518. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  519. vino, pos, &len,
  520. 2,/*include a 'startsync' command*/
  521. CEPH_OSD_OP_WRITE, flags, snapc,
  522. ci->i_truncate_seq,
  523. ci->i_truncate_size,
  524. false);
  525. if (IS_ERR(req)) {
  526. ret = PTR_ERR(req);
  527. break;
  528. }
  529. n = iov_iter_get_pages_alloc(from, &pages, len, &start);
  530. if (unlikely(n < 0)) {
  531. ret = n;
  532. ceph_osdc_put_request(req);
  533. break;
  534. }
  535. num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
  536. /*
  537. * throw out any page cache pages in this range. this
  538. * may block.
  539. */
  540. truncate_inode_pages_range(inode->i_mapping, pos,
  541. (pos+n) | (PAGE_CACHE_SIZE-1));
  542. osd_req_op_extent_osd_data_pages(req, 0, pages, n, start,
  543. false, false);
  544. /* BUG_ON(vino.snap != CEPH_NOSNAP); */
  545. ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
  546. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  547. if (!ret)
  548. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  549. ceph_put_page_vector(pages, num_pages, false);
  550. ceph_osdc_put_request(req);
  551. if (ret)
  552. break;
  553. pos += n;
  554. written += n;
  555. iov_iter_advance(from, n);
  556. if (pos > i_size_read(inode)) {
  557. check_caps = ceph_inode_set_size(inode, pos);
  558. if (check_caps)
  559. ceph_check_caps(ceph_inode(inode),
  560. CHECK_CAPS_AUTHONLY,
  561. NULL);
  562. }
  563. }
  564. if (ret != -EOLDSNAPC && written > 0) {
  565. iocb->ki_pos = pos;
  566. ret = written;
  567. }
  568. return ret;
  569. }
  570. /*
  571. * Synchronous write, straight from __user pointer or user pages.
  572. *
  573. * If write spans object boundary, just do multiple writes. (For a
  574. * correct atomic write, we should e.g. take write locks on all
  575. * objects, rollback on failure, etc.)
  576. */
  577. static ssize_t
  578. ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  579. {
  580. struct file *file = iocb->ki_filp;
  581. struct inode *inode = file_inode(file);
  582. struct ceph_inode_info *ci = ceph_inode(inode);
  583. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  584. struct ceph_snap_context *snapc;
  585. struct ceph_vino vino;
  586. struct ceph_osd_request *req;
  587. struct page **pages;
  588. u64 len;
  589. int num_pages;
  590. int written = 0;
  591. int flags;
  592. int check_caps = 0;
  593. int ret;
  594. struct timespec mtime = CURRENT_TIME;
  595. size_t count = iov_iter_count(from);
  596. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  597. return -EROFS;
  598. dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
  599. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  600. if (ret < 0)
  601. return ret;
  602. ret = invalidate_inode_pages2_range(inode->i_mapping,
  603. pos >> PAGE_CACHE_SHIFT,
  604. (pos + count) >> PAGE_CACHE_SHIFT);
  605. if (ret < 0)
  606. dout("invalidate_inode_pages2_range returned %d\n", ret);
  607. flags = CEPH_OSD_FLAG_ORDERSNAP |
  608. CEPH_OSD_FLAG_ONDISK |
  609. CEPH_OSD_FLAG_WRITE |
  610. CEPH_OSD_FLAG_ACK;
  611. while ((len = iov_iter_count(from)) > 0) {
  612. size_t left;
  613. int n;
  614. snapc = ci->i_snap_realm->cached_context;
  615. vino = ceph_vino(inode);
  616. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  617. vino, pos, &len, 1,
  618. CEPH_OSD_OP_WRITE, flags, snapc,
  619. ci->i_truncate_seq,
  620. ci->i_truncate_size,
  621. false);
  622. if (IS_ERR(req)) {
  623. ret = PTR_ERR(req);
  624. break;
  625. }
  626. /*
  627. * write from beginning of first page,
  628. * regardless of io alignment
  629. */
  630. num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  631. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  632. if (IS_ERR(pages)) {
  633. ret = PTR_ERR(pages);
  634. goto out;
  635. }
  636. left = len;
  637. for (n = 0; n < num_pages; n++) {
  638. size_t plen = min_t(size_t, left, PAGE_SIZE);
  639. ret = copy_page_from_iter(pages[n], 0, plen, from);
  640. if (ret != plen) {
  641. ret = -EFAULT;
  642. break;
  643. }
  644. left -= ret;
  645. }
  646. if (ret < 0) {
  647. ceph_release_page_vector(pages, num_pages);
  648. goto out;
  649. }
  650. /* get a second commit callback */
  651. req->r_unsafe_callback = ceph_sync_write_unsafe;
  652. req->r_inode = inode;
  653. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
  654. false, true);
  655. /* BUG_ON(vino.snap != CEPH_NOSNAP); */
  656. ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
  657. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  658. if (!ret)
  659. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  660. out:
  661. ceph_osdc_put_request(req);
  662. if (ret == 0) {
  663. pos += len;
  664. written += len;
  665. if (pos > i_size_read(inode)) {
  666. check_caps = ceph_inode_set_size(inode, pos);
  667. if (check_caps)
  668. ceph_check_caps(ceph_inode(inode),
  669. CHECK_CAPS_AUTHONLY,
  670. NULL);
  671. }
  672. } else
  673. break;
  674. }
  675. if (ret != -EOLDSNAPC && written > 0) {
  676. ret = written;
  677. iocb->ki_pos = pos;
  678. }
  679. return ret;
  680. }
  681. /*
  682. * Wrap generic_file_aio_read with checks for cap bits on the inode.
  683. * Atomically grab references, so that those bits are not released
  684. * back to the MDS mid-read.
  685. *
  686. * Hmm, the sync read case isn't actually async... should it be?
  687. */
  688. static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
  689. {
  690. struct file *filp = iocb->ki_filp;
  691. struct ceph_file_info *fi = filp->private_data;
  692. size_t len = iocb->ki_nbytes;
  693. struct inode *inode = file_inode(filp);
  694. struct ceph_inode_info *ci = ceph_inode(inode);
  695. ssize_t ret;
  696. int want, got = 0;
  697. int checkeof = 0, read = 0;
  698. again:
  699. dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
  700. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
  701. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  702. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  703. else
  704. want = CEPH_CAP_FILE_CACHE;
  705. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
  706. if (ret < 0)
  707. return ret;
  708. if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  709. (iocb->ki_filp->f_flags & O_DIRECT) ||
  710. (fi->flags & CEPH_F_SYNC)) {
  711. dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  712. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  713. ceph_cap_string(got));
  714. /* hmm, this isn't really async... */
  715. ret = ceph_sync_read(iocb, to, &checkeof);
  716. } else {
  717. dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  718. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  719. ceph_cap_string(got));
  720. ret = generic_file_read_iter(iocb, to);
  721. }
  722. dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
  723. inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
  724. ceph_put_cap_refs(ci, got);
  725. if (checkeof && ret >= 0) {
  726. int statret = ceph_do_getattr(inode,
  727. CEPH_STAT_CAP_SIZE);
  728. /* hit EOF or hole? */
  729. if (statret == 0 && iocb->ki_pos < inode->i_size &&
  730. ret < len) {
  731. dout("sync_read hit hole, ppos %lld < size %lld"
  732. ", reading more\n", iocb->ki_pos,
  733. inode->i_size);
  734. iov_iter_advance(to, ret);
  735. read += ret;
  736. len -= ret;
  737. checkeof = 0;
  738. goto again;
  739. }
  740. }
  741. if (ret >= 0)
  742. ret += read;
  743. return ret;
  744. }
  745. /*
  746. * Take cap references to avoid releasing caps to MDS mid-write.
  747. *
  748. * If we are synchronous, and write with an old snap context, the OSD
  749. * may return EOLDSNAPC. In that case, retry the write.. _after_
  750. * dropping our cap refs and allowing the pending snap to logically
  751. * complete _before_ this write occurs.
  752. *
  753. * If we are near ENOSPC, write synchronously.
  754. */
  755. static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
  756. {
  757. struct file *file = iocb->ki_filp;
  758. struct ceph_file_info *fi = file->private_data;
  759. struct inode *inode = file_inode(file);
  760. struct ceph_inode_info *ci = ceph_inode(inode);
  761. struct ceph_osd_client *osdc =
  762. &ceph_sb_to_client(inode->i_sb)->client->osdc;
  763. ssize_t count = iov_iter_count(from), written = 0;
  764. int err, want, got;
  765. loff_t pos = iocb->ki_pos;
  766. if (ceph_snap(inode) != CEPH_NOSNAP)
  767. return -EROFS;
  768. mutex_lock(&inode->i_mutex);
  769. /* We can write back this queue in page reclaim */
  770. current->backing_dev_info = file->f_mapping->backing_dev_info;
  771. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  772. if (err)
  773. goto out;
  774. if (count == 0)
  775. goto out;
  776. iov_iter_truncate(from, count);
  777. err = file_remove_suid(file);
  778. if (err)
  779. goto out;
  780. err = file_update_time(file);
  781. if (err)
  782. goto out;
  783. retry_snap:
  784. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
  785. err = -ENOSPC;
  786. goto out;
  787. }
  788. dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
  789. inode, ceph_vinop(inode), pos, count, inode->i_size);
  790. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  791. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  792. else
  793. want = CEPH_CAP_FILE_BUFFER;
  794. got = 0;
  795. err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count);
  796. if (err < 0)
  797. goto out;
  798. dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
  799. inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
  800. if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  801. (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
  802. struct iov_iter data;
  803. mutex_unlock(&inode->i_mutex);
  804. /* we might need to revert back to that point */
  805. data = *from;
  806. if (file->f_flags & O_DIRECT)
  807. written = ceph_sync_direct_write(iocb, &data, pos);
  808. else
  809. written = ceph_sync_write(iocb, &data, pos);
  810. if (written == -EOLDSNAPC) {
  811. dout("aio_write %p %llx.%llx %llu~%u"
  812. "got EOLDSNAPC, retrying\n",
  813. inode, ceph_vinop(inode),
  814. pos, (unsigned)count);
  815. mutex_lock(&inode->i_mutex);
  816. goto retry_snap;
  817. }
  818. if (written > 0)
  819. iov_iter_advance(from, written);
  820. } else {
  821. loff_t old_size = inode->i_size;
  822. /*
  823. * No need to acquire the i_truncate_mutex. Because
  824. * the MDS revokes Fwb caps before sending truncate
  825. * message to us. We can't get Fwb cap while there
  826. * are pending vmtruncate. So write and vmtruncate
  827. * can not run at the same time
  828. */
  829. written = generic_perform_write(file, from, pos);
  830. if (likely(written >= 0))
  831. iocb->ki_pos = pos + written;
  832. if (inode->i_size > old_size)
  833. ceph_fscache_update_objectsize(inode);
  834. mutex_unlock(&inode->i_mutex);
  835. }
  836. if (written >= 0) {
  837. int dirty;
  838. spin_lock(&ci->i_ceph_lock);
  839. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  840. spin_unlock(&ci->i_ceph_lock);
  841. if (dirty)
  842. __mark_inode_dirty(inode, dirty);
  843. }
  844. dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
  845. inode, ceph_vinop(inode), pos, (unsigned)count,
  846. ceph_cap_string(got));
  847. ceph_put_cap_refs(ci, got);
  848. if (written >= 0 &&
  849. ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
  850. ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
  851. err = vfs_fsync_range(file, pos, pos + written - 1, 1);
  852. if (err < 0)
  853. written = err;
  854. }
  855. goto out_unlocked;
  856. out:
  857. mutex_unlock(&inode->i_mutex);
  858. out_unlocked:
  859. current->backing_dev_info = NULL;
  860. return written ? written : err;
  861. }
  862. /*
  863. * llseek. be sure to verify file size on SEEK_END.
  864. */
  865. static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
  866. {
  867. struct inode *inode = file->f_mapping->host;
  868. int ret;
  869. mutex_lock(&inode->i_mutex);
  870. if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
  871. ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
  872. if (ret < 0) {
  873. offset = ret;
  874. goto out;
  875. }
  876. }
  877. switch (whence) {
  878. case SEEK_END:
  879. offset += inode->i_size;
  880. break;
  881. case SEEK_CUR:
  882. /*
  883. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  884. * position-querying operation. Avoid rewriting the "same"
  885. * f_pos value back to the file because a concurrent read(),
  886. * write() or lseek() might have altered it
  887. */
  888. if (offset == 0) {
  889. offset = file->f_pos;
  890. goto out;
  891. }
  892. offset += file->f_pos;
  893. break;
  894. case SEEK_DATA:
  895. if (offset >= inode->i_size) {
  896. ret = -ENXIO;
  897. goto out;
  898. }
  899. break;
  900. case SEEK_HOLE:
  901. if (offset >= inode->i_size) {
  902. ret = -ENXIO;
  903. goto out;
  904. }
  905. offset = inode->i_size;
  906. break;
  907. }
  908. offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  909. out:
  910. mutex_unlock(&inode->i_mutex);
  911. return offset;
  912. }
  913. static inline void ceph_zero_partial_page(
  914. struct inode *inode, loff_t offset, unsigned size)
  915. {
  916. struct page *page;
  917. pgoff_t index = offset >> PAGE_CACHE_SHIFT;
  918. page = find_lock_page(inode->i_mapping, index);
  919. if (page) {
  920. wait_on_page_writeback(page);
  921. zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
  922. unlock_page(page);
  923. page_cache_release(page);
  924. }
  925. }
  926. static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
  927. loff_t length)
  928. {
  929. loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
  930. if (offset < nearly) {
  931. loff_t size = nearly - offset;
  932. if (length < size)
  933. size = length;
  934. ceph_zero_partial_page(inode, offset, size);
  935. offset += size;
  936. length -= size;
  937. }
  938. if (length >= PAGE_CACHE_SIZE) {
  939. loff_t size = round_down(length, PAGE_CACHE_SIZE);
  940. truncate_pagecache_range(inode, offset, offset + size - 1);
  941. offset += size;
  942. length -= size;
  943. }
  944. if (length)
  945. ceph_zero_partial_page(inode, offset, length);
  946. }
  947. static int ceph_zero_partial_object(struct inode *inode,
  948. loff_t offset, loff_t *length)
  949. {
  950. struct ceph_inode_info *ci = ceph_inode(inode);
  951. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  952. struct ceph_osd_request *req;
  953. int ret = 0;
  954. loff_t zero = 0;
  955. int op;
  956. if (!length) {
  957. op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
  958. length = &zero;
  959. } else {
  960. op = CEPH_OSD_OP_ZERO;
  961. }
  962. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  963. ceph_vino(inode),
  964. offset, length,
  965. 1, op,
  966. CEPH_OSD_FLAG_WRITE |
  967. CEPH_OSD_FLAG_ONDISK,
  968. NULL, 0, 0, false);
  969. if (IS_ERR(req)) {
  970. ret = PTR_ERR(req);
  971. goto out;
  972. }
  973. ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
  974. &inode->i_mtime);
  975. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  976. if (!ret) {
  977. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  978. if (ret == -ENOENT)
  979. ret = 0;
  980. }
  981. ceph_osdc_put_request(req);
  982. out:
  983. return ret;
  984. }
  985. static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
  986. {
  987. int ret = 0;
  988. struct ceph_inode_info *ci = ceph_inode(inode);
  989. s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
  990. s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
  991. s32 object_size = ceph_file_layout_object_size(ci->i_layout);
  992. u64 object_set_size = object_size * stripe_count;
  993. u64 nearly, t;
  994. /* round offset up to next period boundary */
  995. nearly = offset + object_set_size - 1;
  996. t = nearly;
  997. nearly -= do_div(t, object_set_size);
  998. while (length && offset < nearly) {
  999. loff_t size = length;
  1000. ret = ceph_zero_partial_object(inode, offset, &size);
  1001. if (ret < 0)
  1002. return ret;
  1003. offset += size;
  1004. length -= size;
  1005. }
  1006. while (length >= object_set_size) {
  1007. int i;
  1008. loff_t pos = offset;
  1009. for (i = 0; i < stripe_count; ++i) {
  1010. ret = ceph_zero_partial_object(inode, pos, NULL);
  1011. if (ret < 0)
  1012. return ret;
  1013. pos += stripe_unit;
  1014. }
  1015. offset += object_set_size;
  1016. length -= object_set_size;
  1017. }
  1018. while (length) {
  1019. loff_t size = length;
  1020. ret = ceph_zero_partial_object(inode, offset, &size);
  1021. if (ret < 0)
  1022. return ret;
  1023. offset += size;
  1024. length -= size;
  1025. }
  1026. return ret;
  1027. }
  1028. static long ceph_fallocate(struct file *file, int mode,
  1029. loff_t offset, loff_t length)
  1030. {
  1031. struct ceph_file_info *fi = file->private_data;
  1032. struct inode *inode = file_inode(file);
  1033. struct ceph_inode_info *ci = ceph_inode(inode);
  1034. struct ceph_osd_client *osdc =
  1035. &ceph_inode_to_client(inode)->client->osdc;
  1036. int want, got = 0;
  1037. int dirty;
  1038. int ret = 0;
  1039. loff_t endoff = 0;
  1040. loff_t size;
  1041. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  1042. return -EOPNOTSUPP;
  1043. if (!S_ISREG(inode->i_mode))
  1044. return -EOPNOTSUPP;
  1045. mutex_lock(&inode->i_mutex);
  1046. if (ceph_snap(inode) != CEPH_NOSNAP) {
  1047. ret = -EROFS;
  1048. goto unlock;
  1049. }
  1050. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
  1051. !(mode & FALLOC_FL_PUNCH_HOLE)) {
  1052. ret = -ENOSPC;
  1053. goto unlock;
  1054. }
  1055. size = i_size_read(inode);
  1056. if (!(mode & FALLOC_FL_KEEP_SIZE))
  1057. endoff = offset + length;
  1058. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1059. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1060. else
  1061. want = CEPH_CAP_FILE_BUFFER;
  1062. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
  1063. if (ret < 0)
  1064. goto unlock;
  1065. if (mode & FALLOC_FL_PUNCH_HOLE) {
  1066. if (offset < size)
  1067. ceph_zero_pagecache_range(inode, offset, length);
  1068. ret = ceph_zero_objects(inode, offset, length);
  1069. } else if (endoff > size) {
  1070. truncate_pagecache_range(inode, size, -1);
  1071. if (ceph_inode_set_size(inode, endoff))
  1072. ceph_check_caps(ceph_inode(inode),
  1073. CHECK_CAPS_AUTHONLY, NULL);
  1074. }
  1075. if (!ret) {
  1076. spin_lock(&ci->i_ceph_lock);
  1077. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  1078. spin_unlock(&ci->i_ceph_lock);
  1079. if (dirty)
  1080. __mark_inode_dirty(inode, dirty);
  1081. }
  1082. ceph_put_cap_refs(ci, got);
  1083. unlock:
  1084. mutex_unlock(&inode->i_mutex);
  1085. return ret;
  1086. }
  1087. const struct file_operations ceph_file_fops = {
  1088. .open = ceph_open,
  1089. .release = ceph_release,
  1090. .llseek = ceph_llseek,
  1091. .read = new_sync_read,
  1092. .write = new_sync_write,
  1093. .read_iter = ceph_read_iter,
  1094. .write_iter = ceph_write_iter,
  1095. .mmap = ceph_mmap,
  1096. .fsync = ceph_fsync,
  1097. .lock = ceph_lock,
  1098. .flock = ceph_flock,
  1099. .splice_read = generic_file_splice_read,
  1100. .splice_write = iter_file_splice_write,
  1101. .unlocked_ioctl = ceph_ioctl,
  1102. .compat_ioctl = ceph_ioctl,
  1103. .fallocate = ceph_fallocate,
  1104. };