file.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/file.h>
  6. #include <linux/mount.h>
  7. #include <linux/namei.h>
  8. #include <linux/writeback.h>
  9. #include <linux/falloc.h>
  10. #include "super.h"
  11. #include "mds_client.h"
  12. #include "cache.h"
  13. /*
  14. * Ceph file operations
  15. *
  16. * Implement basic open/close functionality, and implement
  17. * read/write.
  18. *
  19. * We implement three modes of file I/O:
  20. * - buffered uses the generic_file_aio_{read,write} helpers
  21. *
  22. * - synchronous is used when there is multi-client read/write
  23. * sharing, avoids the page cache, and synchronously waits for an
  24. * ack from the OSD.
  25. *
  26. * - direct io takes the variant of the sync path that references
  27. * user pages directly.
  28. *
  29. * fsync() flushes and waits on dirty pages, but just queues metadata
  30. * for writeback: since the MDS can recover size and mtime there is no
  31. * need to wait for MDS acknowledgement.
  32. */
  33. /*
  34. * Prepare an open request. Preallocate ceph_cap to avoid an
  35. * inopportune ENOMEM later.
  36. */
  37. static struct ceph_mds_request *
  38. prepare_open_request(struct super_block *sb, int flags, int create_mode)
  39. {
  40. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  41. struct ceph_mds_client *mdsc = fsc->mdsc;
  42. struct ceph_mds_request *req;
  43. int want_auth = USE_ANY_MDS;
  44. int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
  45. if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
  46. want_auth = USE_AUTH_MDS;
  47. req = ceph_mdsc_create_request(mdsc, op, want_auth);
  48. if (IS_ERR(req))
  49. goto out;
  50. req->r_fmode = ceph_flags_to_mode(flags);
  51. req->r_args.open.flags = cpu_to_le32(flags);
  52. req->r_args.open.mode = cpu_to_le32(create_mode);
  53. out:
  54. return req;
  55. }
  56. /*
  57. * initialize private struct file data.
  58. * if we fail, clean up by dropping fmode reference on the ceph_inode
  59. */
  60. static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
  61. {
  62. struct ceph_file_info *cf;
  63. int ret = 0;
  64. struct ceph_inode_info *ci = ceph_inode(inode);
  65. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  66. struct ceph_mds_client *mdsc = fsc->mdsc;
  67. switch (inode->i_mode & S_IFMT) {
  68. case S_IFREG:
  69. /* First file open request creates the cookie, we want to keep
  70. * this cookie around for the filetime of the inode as not to
  71. * have to worry about fscache register / revoke / operation
  72. * races.
  73. *
  74. * Also, if we know the operation is going to invalidate data
  75. * (non readonly) just nuke the cache right away.
  76. */
  77. ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
  78. if ((fmode & CEPH_FILE_MODE_WR))
  79. ceph_fscache_invalidate(inode);
  80. case S_IFDIR:
  81. dout("init_file %p %p 0%o (regular)\n", inode, file,
  82. inode->i_mode);
  83. cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
  84. if (cf == NULL) {
  85. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  86. return -ENOMEM;
  87. }
  88. cf->fmode = fmode;
  89. cf->next_offset = 2;
  90. file->private_data = cf;
  91. BUG_ON(inode->i_fop->release != ceph_release);
  92. break;
  93. case S_IFLNK:
  94. dout("init_file %p %p 0%o (symlink)\n", inode, file,
  95. inode->i_mode);
  96. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  97. break;
  98. default:
  99. dout("init_file %p %p 0%o (special)\n", inode, file,
  100. inode->i_mode);
  101. /*
  102. * we need to drop the open ref now, since we don't
  103. * have .release set to ceph_release.
  104. */
  105. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  106. BUG_ON(inode->i_fop->release == ceph_release);
  107. /* call the proper open fop */
  108. ret = inode->i_fop->open(inode, file);
  109. }
  110. return ret;
  111. }
  112. /*
  113. * If we already have the requisite capabilities, we can satisfy
  114. * the open request locally (no need to request new caps from the
  115. * MDS). We do, however, need to inform the MDS (asynchronously)
  116. * if our wanted caps set expands.
  117. */
  118. int ceph_open(struct inode *inode, struct file *file)
  119. {
  120. struct ceph_inode_info *ci = ceph_inode(inode);
  121. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  122. struct ceph_mds_client *mdsc = fsc->mdsc;
  123. struct ceph_mds_request *req;
  124. struct ceph_file_info *cf = file->private_data;
  125. struct inode *parent_inode = NULL;
  126. int err;
  127. int flags, fmode, wanted;
  128. if (cf) {
  129. dout("open file %p is already opened\n", file);
  130. return 0;
  131. }
  132. /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
  133. flags = file->f_flags & ~(O_CREAT|O_EXCL);
  134. if (S_ISDIR(inode->i_mode))
  135. flags = O_DIRECTORY; /* mds likes to know */
  136. dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
  137. ceph_vinop(inode), file, flags, file->f_flags);
  138. fmode = ceph_flags_to_mode(flags);
  139. wanted = ceph_caps_for_mode(fmode);
  140. /* snapped files are read-only */
  141. if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
  142. return -EROFS;
  143. /* trivially open snapdir */
  144. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  145. spin_lock(&ci->i_ceph_lock);
  146. __ceph_get_fmode(ci, fmode);
  147. spin_unlock(&ci->i_ceph_lock);
  148. return ceph_init_file(inode, file, fmode);
  149. }
  150. /*
  151. * No need to block if we have caps on the auth MDS (for
  152. * write) or any MDS (for read). Update wanted set
  153. * asynchronously.
  154. */
  155. spin_lock(&ci->i_ceph_lock);
  156. if (__ceph_is_any_real_caps(ci) &&
  157. (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
  158. int mds_wanted = __ceph_caps_mds_wanted(ci);
  159. int issued = __ceph_caps_issued(ci, NULL);
  160. dout("open %p fmode %d want %s issued %s using existing\n",
  161. inode, fmode, ceph_cap_string(wanted),
  162. ceph_cap_string(issued));
  163. __ceph_get_fmode(ci, fmode);
  164. spin_unlock(&ci->i_ceph_lock);
  165. /* adjust wanted? */
  166. if ((issued & wanted) != wanted &&
  167. (mds_wanted & wanted) != wanted &&
  168. ceph_snap(inode) != CEPH_SNAPDIR)
  169. ceph_check_caps(ci, 0, NULL);
  170. return ceph_init_file(inode, file, fmode);
  171. } else if (ceph_snap(inode) != CEPH_NOSNAP &&
  172. (ci->i_snap_caps & wanted) == wanted) {
  173. __ceph_get_fmode(ci, fmode);
  174. spin_unlock(&ci->i_ceph_lock);
  175. return ceph_init_file(inode, file, fmode);
  176. }
  177. spin_unlock(&ci->i_ceph_lock);
  178. dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
  179. req = prepare_open_request(inode->i_sb, flags, 0);
  180. if (IS_ERR(req)) {
  181. err = PTR_ERR(req);
  182. goto out;
  183. }
  184. req->r_inode = inode;
  185. ihold(inode);
  186. req->r_num_caps = 1;
  187. if (flags & O_CREAT)
  188. parent_inode = ceph_get_dentry_parent_inode(file->f_path.dentry);
  189. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  190. iput(parent_inode);
  191. if (!err)
  192. err = ceph_init_file(inode, file, req->r_fmode);
  193. ceph_mdsc_put_request(req);
  194. dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
  195. out:
  196. return err;
  197. }
  198. /*
  199. * Do a lookup + open with a single request. If we get a non-existent
  200. * file or symlink, return 1 so the VFS can retry.
  201. */
  202. int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
  203. struct file *file, unsigned flags, umode_t mode,
  204. int *opened)
  205. {
  206. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  207. struct ceph_mds_client *mdsc = fsc->mdsc;
  208. struct ceph_mds_request *req;
  209. struct dentry *dn;
  210. struct ceph_acls_info acls = {};
  211. int err;
  212. dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
  213. dir, dentry, dentry,
  214. d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
  215. if (dentry->d_name.len > NAME_MAX)
  216. return -ENAMETOOLONG;
  217. err = ceph_init_dentry(dentry);
  218. if (err < 0)
  219. return err;
  220. if (flags & O_CREAT) {
  221. err = ceph_pre_init_acls(dir, &mode, &acls);
  222. if (err < 0)
  223. return err;
  224. }
  225. /* do the open */
  226. req = prepare_open_request(dir->i_sb, flags, mode);
  227. if (IS_ERR(req)) {
  228. err = PTR_ERR(req);
  229. goto out_acl;
  230. }
  231. req->r_dentry = dget(dentry);
  232. req->r_num_caps = 2;
  233. if (flags & O_CREAT) {
  234. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  235. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  236. if (acls.pagelist) {
  237. req->r_pagelist = acls.pagelist;
  238. acls.pagelist = NULL;
  239. }
  240. }
  241. req->r_locked_dir = dir; /* caller holds dir->i_mutex */
  242. err = ceph_mdsc_do_request(mdsc,
  243. (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
  244. req);
  245. err = ceph_handle_snapdir(req, dentry, err);
  246. if (err)
  247. goto out_req;
  248. if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
  249. err = ceph_handle_notrace_create(dir, dentry);
  250. if (d_unhashed(dentry)) {
  251. dn = ceph_finish_lookup(req, dentry, err);
  252. if (IS_ERR(dn))
  253. err = PTR_ERR(dn);
  254. } else {
  255. /* we were given a hashed negative dentry */
  256. dn = NULL;
  257. }
  258. if (err)
  259. goto out_req;
  260. if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
  261. /* make vfs retry on splice, ENOENT, or symlink */
  262. dout("atomic_open finish_no_open on dn %p\n", dn);
  263. err = finish_no_open(file, dn);
  264. } else {
  265. dout("atomic_open finish_open on dn %p\n", dn);
  266. if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
  267. ceph_init_inode_acls(d_inode(dentry), &acls);
  268. *opened |= FILE_CREATED;
  269. }
  270. err = finish_open(file, dentry, ceph_open, opened);
  271. }
  272. out_req:
  273. if (!req->r_err && req->r_target_inode)
  274. ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
  275. ceph_mdsc_put_request(req);
  276. out_acl:
  277. ceph_release_acls_info(&acls);
  278. dout("atomic_open result=%d\n", err);
  279. return err;
  280. }
  281. int ceph_release(struct inode *inode, struct file *file)
  282. {
  283. struct ceph_inode_info *ci = ceph_inode(inode);
  284. struct ceph_file_info *cf = file->private_data;
  285. dout("release inode %p file %p\n", inode, file);
  286. ceph_put_fmode(ci, cf->fmode);
  287. if (cf->last_readdir)
  288. ceph_mdsc_put_request(cf->last_readdir);
  289. kfree(cf->last_name);
  290. kfree(cf->dir_info);
  291. dput(cf->dentry);
  292. kmem_cache_free(ceph_file_cachep, cf);
  293. /* wake up anyone waiting for caps on this inode */
  294. wake_up_all(&ci->i_cap_wq);
  295. return 0;
  296. }
  297. enum {
  298. CHECK_EOF = 1,
  299. READ_INLINE = 2,
  300. };
  301. /*
  302. * Read a range of bytes striped over one or more objects. Iterate over
  303. * objects we stripe over. (That's not atomic, but good enough for now.)
  304. *
  305. * If we get a short result from the OSD, check against i_size; we need to
  306. * only return a short read to the caller if we hit EOF.
  307. */
  308. static int striped_read(struct inode *inode,
  309. u64 off, u64 len,
  310. struct page **pages, int num_pages,
  311. int *checkeof, bool o_direct,
  312. unsigned long buf_align)
  313. {
  314. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  315. struct ceph_inode_info *ci = ceph_inode(inode);
  316. u64 pos, this_len, left;
  317. int io_align, page_align;
  318. int pages_left;
  319. int read;
  320. struct page **page_pos;
  321. int ret;
  322. bool hit_stripe, was_short;
  323. /*
  324. * we may need to do multiple reads. not atomic, unfortunately.
  325. */
  326. pos = off;
  327. left = len;
  328. page_pos = pages;
  329. pages_left = num_pages;
  330. read = 0;
  331. io_align = off & ~PAGE_MASK;
  332. more:
  333. if (o_direct)
  334. page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
  335. else
  336. page_align = pos & ~PAGE_MASK;
  337. this_len = left;
  338. ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
  339. &ci->i_layout, pos, &this_len,
  340. ci->i_truncate_seq,
  341. ci->i_truncate_size,
  342. page_pos, pages_left, page_align);
  343. if (ret == -ENOENT)
  344. ret = 0;
  345. hit_stripe = this_len < left;
  346. was_short = ret >= 0 && ret < this_len;
  347. dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
  348. ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
  349. if (ret >= 0) {
  350. int didpages;
  351. if (was_short && (pos + ret < inode->i_size)) {
  352. int zlen = min(this_len - ret,
  353. inode->i_size - pos - ret);
  354. int zoff = (o_direct ? buf_align : io_align) +
  355. read + ret;
  356. dout(" zero gap %llu to %llu\n",
  357. pos + ret, pos + ret + zlen);
  358. ceph_zero_page_vector_range(zoff, zlen, pages);
  359. ret += zlen;
  360. }
  361. didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
  362. pos += ret;
  363. read = pos - off;
  364. left -= ret;
  365. page_pos += didpages;
  366. pages_left -= didpages;
  367. /* hit stripe and need continue*/
  368. if (left && hit_stripe && pos < inode->i_size)
  369. goto more;
  370. }
  371. if (read > 0) {
  372. ret = read;
  373. /* did we bounce off eof? */
  374. if (pos + left > inode->i_size)
  375. *checkeof = CHECK_EOF;
  376. }
  377. dout("striped_read returns %d\n", ret);
  378. return ret;
  379. }
  380. /*
  381. * Completely synchronous read and write methods. Direct from __user
  382. * buffer to osd, or directly to user pages (if O_DIRECT).
  383. *
  384. * If the read spans object boundary, just do multiple reads.
  385. */
  386. static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
  387. int *checkeof)
  388. {
  389. struct file *file = iocb->ki_filp;
  390. struct inode *inode = file_inode(file);
  391. struct page **pages;
  392. u64 off = iocb->ki_pos;
  393. int num_pages, ret;
  394. size_t len = iov_iter_count(i);
  395. dout("sync_read on file %p %llu~%u %s\n", file, off,
  396. (unsigned)len,
  397. (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
  398. if (!len)
  399. return 0;
  400. /*
  401. * flush any page cache pages in this range. this
  402. * will make concurrent normal and sync io slow,
  403. * but it will at least behave sensibly when they are
  404. * in sequence.
  405. */
  406. ret = filemap_write_and_wait_range(inode->i_mapping, off,
  407. off + len);
  408. if (ret < 0)
  409. return ret;
  410. if (iocb->ki_flags & IOCB_DIRECT) {
  411. while (iov_iter_count(i)) {
  412. size_t start;
  413. ssize_t n;
  414. n = iov_iter_get_pages_alloc(i, &pages, INT_MAX, &start);
  415. if (n < 0)
  416. return n;
  417. num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
  418. ret = striped_read(inode, off, n,
  419. pages, num_pages, checkeof,
  420. 1, start);
  421. ceph_put_page_vector(pages, num_pages, true);
  422. if (ret <= 0)
  423. break;
  424. off += ret;
  425. iov_iter_advance(i, ret);
  426. if (ret < n)
  427. break;
  428. }
  429. } else {
  430. num_pages = calc_pages_for(off, len);
  431. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  432. if (IS_ERR(pages))
  433. return PTR_ERR(pages);
  434. ret = striped_read(inode, off, len, pages,
  435. num_pages, checkeof, 0, 0);
  436. if (ret > 0) {
  437. int l, k = 0;
  438. size_t left = ret;
  439. while (left) {
  440. size_t page_off = off & ~PAGE_MASK;
  441. size_t copy = min_t(size_t,
  442. PAGE_SIZE - page_off, left);
  443. l = copy_page_to_iter(pages[k++], page_off,
  444. copy, i);
  445. off += l;
  446. left -= l;
  447. if (l < copy)
  448. break;
  449. }
  450. }
  451. ceph_release_page_vector(pages, num_pages);
  452. }
  453. if (off > iocb->ki_pos) {
  454. ret = off - iocb->ki_pos;
  455. iocb->ki_pos = off;
  456. }
  457. dout("sync_read result %d\n", ret);
  458. return ret;
  459. }
  460. /*
  461. * Write commit request unsafe callback, called to tell us when a
  462. * request is unsafe (that is, in flight--has been handed to the
  463. * messenger to send to its target osd). It is called again when
  464. * we've received a response message indicating the request is
  465. * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
  466. * is completed early (and unsuccessfully) due to a timeout or
  467. * interrupt.
  468. *
  469. * This is used if we requested both an ACK and ONDISK commit reply
  470. * from the OSD.
  471. */
  472. static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
  473. {
  474. struct ceph_inode_info *ci = ceph_inode(req->r_inode);
  475. dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
  476. unsafe ? "un" : "");
  477. if (unsafe) {
  478. ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
  479. spin_lock(&ci->i_unsafe_lock);
  480. list_add_tail(&req->r_unsafe_item,
  481. &ci->i_unsafe_writes);
  482. spin_unlock(&ci->i_unsafe_lock);
  483. } else {
  484. spin_lock(&ci->i_unsafe_lock);
  485. list_del_init(&req->r_unsafe_item);
  486. spin_unlock(&ci->i_unsafe_lock);
  487. ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
  488. }
  489. }
  490. /*
  491. * Synchronous write, straight from __user pointer or user pages.
  492. *
  493. * If write spans object boundary, just do multiple writes. (For a
  494. * correct atomic write, we should e.g. take write locks on all
  495. * objects, rollback on failure, etc.)
  496. */
  497. static ssize_t
  498. ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  499. {
  500. struct file *file = iocb->ki_filp;
  501. struct inode *inode = file_inode(file);
  502. struct ceph_inode_info *ci = ceph_inode(inode);
  503. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  504. struct ceph_snap_context *snapc;
  505. struct ceph_vino vino;
  506. struct ceph_osd_request *req;
  507. struct page **pages;
  508. int num_pages;
  509. int written = 0;
  510. int flags;
  511. int check_caps = 0;
  512. int ret;
  513. struct timespec mtime = CURRENT_TIME;
  514. size_t count = iov_iter_count(from);
  515. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  516. return -EROFS;
  517. dout("sync_direct_write on file %p %lld~%u\n", file, pos,
  518. (unsigned)count);
  519. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  520. if (ret < 0)
  521. return ret;
  522. ret = invalidate_inode_pages2_range(inode->i_mapping,
  523. pos >> PAGE_CACHE_SHIFT,
  524. (pos + count) >> PAGE_CACHE_SHIFT);
  525. if (ret < 0)
  526. dout("invalidate_inode_pages2_range returned %d\n", ret);
  527. flags = CEPH_OSD_FLAG_ORDERSNAP |
  528. CEPH_OSD_FLAG_ONDISK |
  529. CEPH_OSD_FLAG_WRITE;
  530. while (iov_iter_count(from) > 0) {
  531. u64 len = iov_iter_single_seg_count(from);
  532. size_t start;
  533. ssize_t n;
  534. snapc = ci->i_snap_realm->cached_context;
  535. vino = ceph_vino(inode);
  536. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  537. vino, pos, &len, 0,
  538. 2,/*include a 'startsync' command*/
  539. CEPH_OSD_OP_WRITE, flags, snapc,
  540. ci->i_truncate_seq,
  541. ci->i_truncate_size,
  542. false);
  543. if (IS_ERR(req)) {
  544. ret = PTR_ERR(req);
  545. break;
  546. }
  547. osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
  548. n = iov_iter_get_pages_alloc(from, &pages, len, &start);
  549. if (unlikely(n < 0)) {
  550. ret = n;
  551. ceph_osdc_put_request(req);
  552. break;
  553. }
  554. num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
  555. /*
  556. * throw out any page cache pages in this range. this
  557. * may block.
  558. */
  559. truncate_inode_pages_range(inode->i_mapping, pos,
  560. (pos+n) | (PAGE_CACHE_SIZE-1));
  561. osd_req_op_extent_osd_data_pages(req, 0, pages, n, start,
  562. false, false);
  563. /* BUG_ON(vino.snap != CEPH_NOSNAP); */
  564. ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
  565. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  566. if (!ret)
  567. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  568. ceph_put_page_vector(pages, num_pages, false);
  569. ceph_osdc_put_request(req);
  570. if (ret)
  571. break;
  572. pos += n;
  573. written += n;
  574. iov_iter_advance(from, n);
  575. if (pos > i_size_read(inode)) {
  576. check_caps = ceph_inode_set_size(inode, pos);
  577. if (check_caps)
  578. ceph_check_caps(ceph_inode(inode),
  579. CHECK_CAPS_AUTHONLY,
  580. NULL);
  581. }
  582. }
  583. if (ret != -EOLDSNAPC && written > 0) {
  584. iocb->ki_pos = pos;
  585. ret = written;
  586. }
  587. return ret;
  588. }
  589. /*
  590. * Synchronous write, straight from __user pointer or user pages.
  591. *
  592. * If write spans object boundary, just do multiple writes. (For a
  593. * correct atomic write, we should e.g. take write locks on all
  594. * objects, rollback on failure, etc.)
  595. */
  596. static ssize_t
  597. ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  598. {
  599. struct file *file = iocb->ki_filp;
  600. struct inode *inode = file_inode(file);
  601. struct ceph_inode_info *ci = ceph_inode(inode);
  602. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  603. struct ceph_snap_context *snapc;
  604. struct ceph_vino vino;
  605. struct ceph_osd_request *req;
  606. struct page **pages;
  607. u64 len;
  608. int num_pages;
  609. int written = 0;
  610. int flags;
  611. int check_caps = 0;
  612. int ret;
  613. struct timespec mtime = CURRENT_TIME;
  614. size_t count = iov_iter_count(from);
  615. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  616. return -EROFS;
  617. dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
  618. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  619. if (ret < 0)
  620. return ret;
  621. ret = invalidate_inode_pages2_range(inode->i_mapping,
  622. pos >> PAGE_CACHE_SHIFT,
  623. (pos + count) >> PAGE_CACHE_SHIFT);
  624. if (ret < 0)
  625. dout("invalidate_inode_pages2_range returned %d\n", ret);
  626. flags = CEPH_OSD_FLAG_ORDERSNAP |
  627. CEPH_OSD_FLAG_ONDISK |
  628. CEPH_OSD_FLAG_WRITE |
  629. CEPH_OSD_FLAG_ACK;
  630. while ((len = iov_iter_count(from)) > 0) {
  631. size_t left;
  632. int n;
  633. snapc = ci->i_snap_realm->cached_context;
  634. vino = ceph_vino(inode);
  635. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  636. vino, pos, &len, 0, 1,
  637. CEPH_OSD_OP_WRITE, flags, snapc,
  638. ci->i_truncate_seq,
  639. ci->i_truncate_size,
  640. false);
  641. if (IS_ERR(req)) {
  642. ret = PTR_ERR(req);
  643. break;
  644. }
  645. /*
  646. * write from beginning of first page,
  647. * regardless of io alignment
  648. */
  649. num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  650. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  651. if (IS_ERR(pages)) {
  652. ret = PTR_ERR(pages);
  653. goto out;
  654. }
  655. left = len;
  656. for (n = 0; n < num_pages; n++) {
  657. size_t plen = min_t(size_t, left, PAGE_SIZE);
  658. ret = copy_page_from_iter(pages[n], 0, plen, from);
  659. if (ret != plen) {
  660. ret = -EFAULT;
  661. break;
  662. }
  663. left -= ret;
  664. }
  665. if (ret < 0) {
  666. ceph_release_page_vector(pages, num_pages);
  667. goto out;
  668. }
  669. /* get a second commit callback */
  670. req->r_unsafe_callback = ceph_sync_write_unsafe;
  671. req->r_inode = inode;
  672. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
  673. false, true);
  674. /* BUG_ON(vino.snap != CEPH_NOSNAP); */
  675. ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
  676. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  677. if (!ret)
  678. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  679. out:
  680. ceph_osdc_put_request(req);
  681. if (ret == 0) {
  682. pos += len;
  683. written += len;
  684. if (pos > i_size_read(inode)) {
  685. check_caps = ceph_inode_set_size(inode, pos);
  686. if (check_caps)
  687. ceph_check_caps(ceph_inode(inode),
  688. CHECK_CAPS_AUTHONLY,
  689. NULL);
  690. }
  691. } else
  692. break;
  693. }
  694. if (ret != -EOLDSNAPC && written > 0) {
  695. ret = written;
  696. iocb->ki_pos = pos;
  697. }
  698. return ret;
  699. }
  700. /*
  701. * Wrap generic_file_aio_read with checks for cap bits on the inode.
  702. * Atomically grab references, so that those bits are not released
  703. * back to the MDS mid-read.
  704. *
  705. * Hmm, the sync read case isn't actually async... should it be?
  706. */
  707. static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
  708. {
  709. struct file *filp = iocb->ki_filp;
  710. struct ceph_file_info *fi = filp->private_data;
  711. size_t len = iov_iter_count(to);
  712. struct inode *inode = file_inode(filp);
  713. struct ceph_inode_info *ci = ceph_inode(inode);
  714. struct page *pinned_page = NULL;
  715. ssize_t ret;
  716. int want, got = 0;
  717. int retry_op = 0, read = 0;
  718. again:
  719. dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
  720. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
  721. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  722. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  723. else
  724. want = CEPH_CAP_FILE_CACHE;
  725. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
  726. if (ret < 0)
  727. return ret;
  728. if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  729. (iocb->ki_flags & IOCB_DIRECT) ||
  730. (fi->flags & CEPH_F_SYNC)) {
  731. dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  732. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  733. ceph_cap_string(got));
  734. if (ci->i_inline_version == CEPH_INLINE_NONE) {
  735. /* hmm, this isn't really async... */
  736. ret = ceph_sync_read(iocb, to, &retry_op);
  737. } else {
  738. retry_op = READ_INLINE;
  739. }
  740. } else {
  741. dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  742. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  743. ceph_cap_string(got));
  744. ret = generic_file_read_iter(iocb, to);
  745. }
  746. dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
  747. inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
  748. if (pinned_page) {
  749. page_cache_release(pinned_page);
  750. pinned_page = NULL;
  751. }
  752. ceph_put_cap_refs(ci, got);
  753. if (retry_op && ret >= 0) {
  754. int statret;
  755. struct page *page = NULL;
  756. loff_t i_size;
  757. if (retry_op == READ_INLINE) {
  758. page = __page_cache_alloc(GFP_NOFS);
  759. if (!page)
  760. return -ENOMEM;
  761. }
  762. statret = __ceph_do_getattr(inode, page,
  763. CEPH_STAT_CAP_INLINE_DATA, !!page);
  764. if (statret < 0) {
  765. __free_page(page);
  766. if (statret == -ENODATA) {
  767. BUG_ON(retry_op != READ_INLINE);
  768. goto again;
  769. }
  770. return statret;
  771. }
  772. i_size = i_size_read(inode);
  773. if (retry_op == READ_INLINE) {
  774. BUG_ON(ret > 0 || read > 0);
  775. if (iocb->ki_pos < i_size &&
  776. iocb->ki_pos < PAGE_CACHE_SIZE) {
  777. loff_t end = min_t(loff_t, i_size,
  778. iocb->ki_pos + len);
  779. end = min_t(loff_t, end, PAGE_CACHE_SIZE);
  780. if (statret < end)
  781. zero_user_segment(page, statret, end);
  782. ret = copy_page_to_iter(page,
  783. iocb->ki_pos & ~PAGE_MASK,
  784. end - iocb->ki_pos, to);
  785. iocb->ki_pos += ret;
  786. read += ret;
  787. }
  788. if (iocb->ki_pos < i_size && read < len) {
  789. size_t zlen = min_t(size_t, len - read,
  790. i_size - iocb->ki_pos);
  791. ret = iov_iter_zero(zlen, to);
  792. iocb->ki_pos += ret;
  793. read += ret;
  794. }
  795. __free_pages(page, 0);
  796. return read;
  797. }
  798. /* hit EOF or hole? */
  799. if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
  800. ret < len) {
  801. dout("sync_read hit hole, ppos %lld < size %lld"
  802. ", reading more\n", iocb->ki_pos,
  803. inode->i_size);
  804. read += ret;
  805. len -= ret;
  806. retry_op = 0;
  807. goto again;
  808. }
  809. }
  810. if (ret >= 0)
  811. ret += read;
  812. return ret;
  813. }
  814. /*
  815. * Take cap references to avoid releasing caps to MDS mid-write.
  816. *
  817. * If we are synchronous, and write with an old snap context, the OSD
  818. * may return EOLDSNAPC. In that case, retry the write.. _after_
  819. * dropping our cap refs and allowing the pending snap to logically
  820. * complete _before_ this write occurs.
  821. *
  822. * If we are near ENOSPC, write synchronously.
  823. */
  824. static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
  825. {
  826. struct file *file = iocb->ki_filp;
  827. struct ceph_file_info *fi = file->private_data;
  828. struct inode *inode = file_inode(file);
  829. struct ceph_inode_info *ci = ceph_inode(inode);
  830. struct ceph_osd_client *osdc =
  831. &ceph_sb_to_client(inode->i_sb)->client->osdc;
  832. ssize_t count, written = 0;
  833. int err, want, got;
  834. loff_t pos;
  835. if (ceph_snap(inode) != CEPH_NOSNAP)
  836. return -EROFS;
  837. mutex_lock(&inode->i_mutex);
  838. /* We can write back this queue in page reclaim */
  839. current->backing_dev_info = inode_to_bdi(inode);
  840. err = generic_write_checks(iocb, from);
  841. if (err <= 0)
  842. goto out;
  843. pos = iocb->ki_pos;
  844. count = iov_iter_count(from);
  845. err = file_remove_suid(file);
  846. if (err)
  847. goto out;
  848. err = file_update_time(file);
  849. if (err)
  850. goto out;
  851. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  852. err = ceph_uninline_data(file, NULL);
  853. if (err < 0)
  854. goto out;
  855. }
  856. retry_snap:
  857. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
  858. err = -ENOSPC;
  859. goto out;
  860. }
  861. dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
  862. inode, ceph_vinop(inode), pos, count, inode->i_size);
  863. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  864. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  865. else
  866. want = CEPH_CAP_FILE_BUFFER;
  867. got = 0;
  868. err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
  869. &got, NULL);
  870. if (err < 0)
  871. goto out;
  872. dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
  873. inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
  874. if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  875. (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
  876. struct iov_iter data;
  877. mutex_unlock(&inode->i_mutex);
  878. /* we might need to revert back to that point */
  879. data = *from;
  880. if (iocb->ki_flags & IOCB_DIRECT)
  881. written = ceph_sync_direct_write(iocb, &data, pos);
  882. else
  883. written = ceph_sync_write(iocb, &data, pos);
  884. if (written == -EOLDSNAPC) {
  885. dout("aio_write %p %llx.%llx %llu~%u"
  886. "got EOLDSNAPC, retrying\n",
  887. inode, ceph_vinop(inode),
  888. pos, (unsigned)count);
  889. mutex_lock(&inode->i_mutex);
  890. goto retry_snap;
  891. }
  892. if (written > 0)
  893. iov_iter_advance(from, written);
  894. } else {
  895. loff_t old_size = inode->i_size;
  896. /*
  897. * No need to acquire the i_truncate_mutex. Because
  898. * the MDS revokes Fwb caps before sending truncate
  899. * message to us. We can't get Fwb cap while there
  900. * are pending vmtruncate. So write and vmtruncate
  901. * can not run at the same time
  902. */
  903. written = generic_perform_write(file, from, pos);
  904. if (likely(written >= 0))
  905. iocb->ki_pos = pos + written;
  906. if (inode->i_size > old_size)
  907. ceph_fscache_update_objectsize(inode);
  908. mutex_unlock(&inode->i_mutex);
  909. }
  910. if (written >= 0) {
  911. int dirty;
  912. spin_lock(&ci->i_ceph_lock);
  913. ci->i_inline_version = CEPH_INLINE_NONE;
  914. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  915. spin_unlock(&ci->i_ceph_lock);
  916. if (dirty)
  917. __mark_inode_dirty(inode, dirty);
  918. }
  919. dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
  920. inode, ceph_vinop(inode), pos, (unsigned)count,
  921. ceph_cap_string(got));
  922. ceph_put_cap_refs(ci, got);
  923. if (written >= 0 &&
  924. ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
  925. ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
  926. err = vfs_fsync_range(file, pos, pos + written - 1, 1);
  927. if (err < 0)
  928. written = err;
  929. }
  930. goto out_unlocked;
  931. out:
  932. mutex_unlock(&inode->i_mutex);
  933. out_unlocked:
  934. current->backing_dev_info = NULL;
  935. return written ? written : err;
  936. }
  937. /*
  938. * llseek. be sure to verify file size on SEEK_END.
  939. */
  940. static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
  941. {
  942. struct inode *inode = file->f_mapping->host;
  943. int ret;
  944. mutex_lock(&inode->i_mutex);
  945. if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
  946. ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
  947. if (ret < 0) {
  948. offset = ret;
  949. goto out;
  950. }
  951. }
  952. switch (whence) {
  953. case SEEK_END:
  954. offset += inode->i_size;
  955. break;
  956. case SEEK_CUR:
  957. /*
  958. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  959. * position-querying operation. Avoid rewriting the "same"
  960. * f_pos value back to the file because a concurrent read(),
  961. * write() or lseek() might have altered it
  962. */
  963. if (offset == 0) {
  964. offset = file->f_pos;
  965. goto out;
  966. }
  967. offset += file->f_pos;
  968. break;
  969. case SEEK_DATA:
  970. if (offset >= inode->i_size) {
  971. ret = -ENXIO;
  972. goto out;
  973. }
  974. break;
  975. case SEEK_HOLE:
  976. if (offset >= inode->i_size) {
  977. ret = -ENXIO;
  978. goto out;
  979. }
  980. offset = inode->i_size;
  981. break;
  982. }
  983. offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  984. out:
  985. mutex_unlock(&inode->i_mutex);
  986. return offset;
  987. }
  988. static inline void ceph_zero_partial_page(
  989. struct inode *inode, loff_t offset, unsigned size)
  990. {
  991. struct page *page;
  992. pgoff_t index = offset >> PAGE_CACHE_SHIFT;
  993. page = find_lock_page(inode->i_mapping, index);
  994. if (page) {
  995. wait_on_page_writeback(page);
  996. zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
  997. unlock_page(page);
  998. page_cache_release(page);
  999. }
  1000. }
  1001. static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
  1002. loff_t length)
  1003. {
  1004. loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
  1005. if (offset < nearly) {
  1006. loff_t size = nearly - offset;
  1007. if (length < size)
  1008. size = length;
  1009. ceph_zero_partial_page(inode, offset, size);
  1010. offset += size;
  1011. length -= size;
  1012. }
  1013. if (length >= PAGE_CACHE_SIZE) {
  1014. loff_t size = round_down(length, PAGE_CACHE_SIZE);
  1015. truncate_pagecache_range(inode, offset, offset + size - 1);
  1016. offset += size;
  1017. length -= size;
  1018. }
  1019. if (length)
  1020. ceph_zero_partial_page(inode, offset, length);
  1021. }
  1022. static int ceph_zero_partial_object(struct inode *inode,
  1023. loff_t offset, loff_t *length)
  1024. {
  1025. struct ceph_inode_info *ci = ceph_inode(inode);
  1026. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1027. struct ceph_osd_request *req;
  1028. int ret = 0;
  1029. loff_t zero = 0;
  1030. int op;
  1031. if (!length) {
  1032. op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
  1033. length = &zero;
  1034. } else {
  1035. op = CEPH_OSD_OP_ZERO;
  1036. }
  1037. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1038. ceph_vino(inode),
  1039. offset, length,
  1040. 0, 1, op,
  1041. CEPH_OSD_FLAG_WRITE |
  1042. CEPH_OSD_FLAG_ONDISK,
  1043. NULL, 0, 0, false);
  1044. if (IS_ERR(req)) {
  1045. ret = PTR_ERR(req);
  1046. goto out;
  1047. }
  1048. ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
  1049. &inode->i_mtime);
  1050. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1051. if (!ret) {
  1052. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1053. if (ret == -ENOENT)
  1054. ret = 0;
  1055. }
  1056. ceph_osdc_put_request(req);
  1057. out:
  1058. return ret;
  1059. }
  1060. static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
  1061. {
  1062. int ret = 0;
  1063. struct ceph_inode_info *ci = ceph_inode(inode);
  1064. s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
  1065. s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
  1066. s32 object_size = ceph_file_layout_object_size(ci->i_layout);
  1067. u64 object_set_size = object_size * stripe_count;
  1068. u64 nearly, t;
  1069. /* round offset up to next period boundary */
  1070. nearly = offset + object_set_size - 1;
  1071. t = nearly;
  1072. nearly -= do_div(t, object_set_size);
  1073. while (length && offset < nearly) {
  1074. loff_t size = length;
  1075. ret = ceph_zero_partial_object(inode, offset, &size);
  1076. if (ret < 0)
  1077. return ret;
  1078. offset += size;
  1079. length -= size;
  1080. }
  1081. while (length >= object_set_size) {
  1082. int i;
  1083. loff_t pos = offset;
  1084. for (i = 0; i < stripe_count; ++i) {
  1085. ret = ceph_zero_partial_object(inode, pos, NULL);
  1086. if (ret < 0)
  1087. return ret;
  1088. pos += stripe_unit;
  1089. }
  1090. offset += object_set_size;
  1091. length -= object_set_size;
  1092. }
  1093. while (length) {
  1094. loff_t size = length;
  1095. ret = ceph_zero_partial_object(inode, offset, &size);
  1096. if (ret < 0)
  1097. return ret;
  1098. offset += size;
  1099. length -= size;
  1100. }
  1101. return ret;
  1102. }
  1103. static long ceph_fallocate(struct file *file, int mode,
  1104. loff_t offset, loff_t length)
  1105. {
  1106. struct ceph_file_info *fi = file->private_data;
  1107. struct inode *inode = file_inode(file);
  1108. struct ceph_inode_info *ci = ceph_inode(inode);
  1109. struct ceph_osd_client *osdc =
  1110. &ceph_inode_to_client(inode)->client->osdc;
  1111. int want, got = 0;
  1112. int dirty;
  1113. int ret = 0;
  1114. loff_t endoff = 0;
  1115. loff_t size;
  1116. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  1117. return -EOPNOTSUPP;
  1118. if (!S_ISREG(inode->i_mode))
  1119. return -EOPNOTSUPP;
  1120. mutex_lock(&inode->i_mutex);
  1121. if (ceph_snap(inode) != CEPH_NOSNAP) {
  1122. ret = -EROFS;
  1123. goto unlock;
  1124. }
  1125. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
  1126. !(mode & FALLOC_FL_PUNCH_HOLE)) {
  1127. ret = -ENOSPC;
  1128. goto unlock;
  1129. }
  1130. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1131. ret = ceph_uninline_data(file, NULL);
  1132. if (ret < 0)
  1133. goto unlock;
  1134. }
  1135. size = i_size_read(inode);
  1136. if (!(mode & FALLOC_FL_KEEP_SIZE))
  1137. endoff = offset + length;
  1138. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1139. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1140. else
  1141. want = CEPH_CAP_FILE_BUFFER;
  1142. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
  1143. if (ret < 0)
  1144. goto unlock;
  1145. if (mode & FALLOC_FL_PUNCH_HOLE) {
  1146. if (offset < size)
  1147. ceph_zero_pagecache_range(inode, offset, length);
  1148. ret = ceph_zero_objects(inode, offset, length);
  1149. } else if (endoff > size) {
  1150. truncate_pagecache_range(inode, size, -1);
  1151. if (ceph_inode_set_size(inode, endoff))
  1152. ceph_check_caps(ceph_inode(inode),
  1153. CHECK_CAPS_AUTHONLY, NULL);
  1154. }
  1155. if (!ret) {
  1156. spin_lock(&ci->i_ceph_lock);
  1157. ci->i_inline_version = CEPH_INLINE_NONE;
  1158. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  1159. spin_unlock(&ci->i_ceph_lock);
  1160. if (dirty)
  1161. __mark_inode_dirty(inode, dirty);
  1162. }
  1163. ceph_put_cap_refs(ci, got);
  1164. unlock:
  1165. mutex_unlock(&inode->i_mutex);
  1166. return ret;
  1167. }
  1168. const struct file_operations ceph_file_fops = {
  1169. .open = ceph_open,
  1170. .release = ceph_release,
  1171. .llseek = ceph_llseek,
  1172. .read_iter = ceph_read_iter,
  1173. .write_iter = ceph_write_iter,
  1174. .mmap = ceph_mmap,
  1175. .fsync = ceph_fsync,
  1176. .lock = ceph_lock,
  1177. .flock = ceph_flock,
  1178. .splice_read = generic_file_splice_read,
  1179. .splice_write = iter_file_splice_write,
  1180. .unlocked_ioctl = ceph_ioctl,
  1181. .compat_ioctl = ceph_ioctl,
  1182. .fallocate = ceph_fallocate,
  1183. };