file.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/file.h>
  6. #include <linux/mount.h>
  7. #include <linux/namei.h>
  8. #include <linux/writeback.h>
  9. #include <linux/aio.h>
  10. #include <linux/falloc.h>
  11. #include "super.h"
  12. #include "mds_client.h"
  13. #include "cache.h"
  14. /*
  15. * Ceph file operations
  16. *
  17. * Implement basic open/close functionality, and implement
  18. * read/write.
  19. *
  20. * We implement three modes of file I/O:
  21. * - buffered uses the generic_file_aio_{read,write} helpers
  22. *
  23. * - synchronous is used when there is multi-client read/write
  24. * sharing, avoids the page cache, and synchronously waits for an
  25. * ack from the OSD.
  26. *
  27. * - direct io takes the variant of the sync path that references
  28. * user pages directly.
  29. *
  30. * fsync() flushes and waits on dirty pages, but just queues metadata
  31. * for writeback: since the MDS can recover size and mtime there is no
  32. * need to wait for MDS acknowledgement.
  33. */
  34. /*
  35. * Prepare an open request. Preallocate ceph_cap to avoid an
  36. * inopportune ENOMEM later.
  37. */
  38. static struct ceph_mds_request *
  39. prepare_open_request(struct super_block *sb, int flags, int create_mode)
  40. {
  41. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  42. struct ceph_mds_client *mdsc = fsc->mdsc;
  43. struct ceph_mds_request *req;
  44. int want_auth = USE_ANY_MDS;
  45. int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
  46. if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
  47. want_auth = USE_AUTH_MDS;
  48. req = ceph_mdsc_create_request(mdsc, op, want_auth);
  49. if (IS_ERR(req))
  50. goto out;
  51. req->r_fmode = ceph_flags_to_mode(flags);
  52. req->r_args.open.flags = cpu_to_le32(flags);
  53. req->r_args.open.mode = cpu_to_le32(create_mode);
  54. out:
  55. return req;
  56. }
  57. /*
  58. * initialize private struct file data.
  59. * if we fail, clean up by dropping fmode reference on the ceph_inode
  60. */
  61. static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
  62. {
  63. struct ceph_file_info *cf;
  64. int ret = 0;
  65. struct ceph_inode_info *ci = ceph_inode(inode);
  66. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  67. struct ceph_mds_client *mdsc = fsc->mdsc;
  68. switch (inode->i_mode & S_IFMT) {
  69. case S_IFREG:
  70. /* First file open request creates the cookie, we want to keep
  71. * this cookie around for the filetime of the inode as not to
  72. * have to worry about fscache register / revoke / operation
  73. * races.
  74. *
  75. * Also, if we know the operation is going to invalidate data
  76. * (non readonly) just nuke the cache right away.
  77. */
  78. ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
  79. if ((fmode & CEPH_FILE_MODE_WR))
  80. ceph_fscache_invalidate(inode);
  81. case S_IFDIR:
  82. dout("init_file %p %p 0%o (regular)\n", inode, file,
  83. inode->i_mode);
  84. cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
  85. if (cf == NULL) {
  86. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  87. return -ENOMEM;
  88. }
  89. cf->fmode = fmode;
  90. cf->next_offset = 2;
  91. file->private_data = cf;
  92. BUG_ON(inode->i_fop->release != ceph_release);
  93. break;
  94. case S_IFLNK:
  95. dout("init_file %p %p 0%o (symlink)\n", inode, file,
  96. inode->i_mode);
  97. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  98. break;
  99. default:
  100. dout("init_file %p %p 0%o (special)\n", inode, file,
  101. inode->i_mode);
  102. /*
  103. * we need to drop the open ref now, since we don't
  104. * have .release set to ceph_release.
  105. */
  106. ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
  107. BUG_ON(inode->i_fop->release == ceph_release);
  108. /* call the proper open fop */
  109. ret = inode->i_fop->open(inode, file);
  110. }
  111. return ret;
  112. }
  113. /*
  114. * If we already have the requisite capabilities, we can satisfy
  115. * the open request locally (no need to request new caps from the
  116. * MDS). We do, however, need to inform the MDS (asynchronously)
  117. * if our wanted caps set expands.
  118. */
  119. int ceph_open(struct inode *inode, struct file *file)
  120. {
  121. struct ceph_inode_info *ci = ceph_inode(inode);
  122. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  123. struct ceph_mds_client *mdsc = fsc->mdsc;
  124. struct ceph_mds_request *req;
  125. struct ceph_file_info *cf = file->private_data;
  126. struct inode *parent_inode = NULL;
  127. int err;
  128. int flags, fmode, wanted;
  129. if (cf) {
  130. dout("open file %p is already opened\n", file);
  131. return 0;
  132. }
  133. /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
  134. flags = file->f_flags & ~(O_CREAT|O_EXCL);
  135. if (S_ISDIR(inode->i_mode))
  136. flags = O_DIRECTORY; /* mds likes to know */
  137. dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
  138. ceph_vinop(inode), file, flags, file->f_flags);
  139. fmode = ceph_flags_to_mode(flags);
  140. wanted = ceph_caps_for_mode(fmode);
  141. /* snapped files are read-only */
  142. if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
  143. return -EROFS;
  144. /* trivially open snapdir */
  145. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  146. spin_lock(&ci->i_ceph_lock);
  147. __ceph_get_fmode(ci, fmode);
  148. spin_unlock(&ci->i_ceph_lock);
  149. return ceph_init_file(inode, file, fmode);
  150. }
  151. /*
  152. * No need to block if we have caps on the auth MDS (for
  153. * write) or any MDS (for read). Update wanted set
  154. * asynchronously.
  155. */
  156. spin_lock(&ci->i_ceph_lock);
  157. if (__ceph_is_any_real_caps(ci) &&
  158. (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
  159. int mds_wanted = __ceph_caps_mds_wanted(ci);
  160. int issued = __ceph_caps_issued(ci, NULL);
  161. dout("open %p fmode %d want %s issued %s using existing\n",
  162. inode, fmode, ceph_cap_string(wanted),
  163. ceph_cap_string(issued));
  164. __ceph_get_fmode(ci, fmode);
  165. spin_unlock(&ci->i_ceph_lock);
  166. /* adjust wanted? */
  167. if ((issued & wanted) != wanted &&
  168. (mds_wanted & wanted) != wanted &&
  169. ceph_snap(inode) != CEPH_SNAPDIR)
  170. ceph_check_caps(ci, 0, NULL);
  171. return ceph_init_file(inode, file, fmode);
  172. } else if (ceph_snap(inode) != CEPH_NOSNAP &&
  173. (ci->i_snap_caps & wanted) == wanted) {
  174. __ceph_get_fmode(ci, fmode);
  175. spin_unlock(&ci->i_ceph_lock);
  176. return ceph_init_file(inode, file, fmode);
  177. }
  178. spin_unlock(&ci->i_ceph_lock);
  179. dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
  180. req = prepare_open_request(inode->i_sb, flags, 0);
  181. if (IS_ERR(req)) {
  182. err = PTR_ERR(req);
  183. goto out;
  184. }
  185. req->r_inode = inode;
  186. ihold(inode);
  187. req->r_num_caps = 1;
  188. if (flags & O_CREAT)
  189. parent_inode = ceph_get_dentry_parent_inode(file->f_path.dentry);
  190. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  191. iput(parent_inode);
  192. if (!err)
  193. err = ceph_init_file(inode, file, req->r_fmode);
  194. ceph_mdsc_put_request(req);
  195. dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
  196. out:
  197. return err;
  198. }
  199. /*
  200. * Do a lookup + open with a single request. If we get a non-existent
  201. * file or symlink, return 1 so the VFS can retry.
  202. */
  203. int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
  204. struct file *file, unsigned flags, umode_t mode,
  205. int *opened)
  206. {
  207. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  208. struct ceph_mds_client *mdsc = fsc->mdsc;
  209. struct ceph_mds_request *req;
  210. struct dentry *dn;
  211. struct ceph_acls_info acls = {};
  212. int err;
  213. dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
  214. dir, dentry, dentry,
  215. d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
  216. if (dentry->d_name.len > NAME_MAX)
  217. return -ENAMETOOLONG;
  218. err = ceph_init_dentry(dentry);
  219. if (err < 0)
  220. return err;
  221. if (flags & O_CREAT) {
  222. err = ceph_pre_init_acls(dir, &mode, &acls);
  223. if (err < 0)
  224. return err;
  225. }
  226. /* do the open */
  227. req = prepare_open_request(dir->i_sb, flags, mode);
  228. if (IS_ERR(req)) {
  229. err = PTR_ERR(req);
  230. goto out_acl;
  231. }
  232. req->r_dentry = dget(dentry);
  233. req->r_num_caps = 2;
  234. if (flags & O_CREAT) {
  235. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  236. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  237. if (acls.pagelist) {
  238. req->r_pagelist = acls.pagelist;
  239. acls.pagelist = NULL;
  240. }
  241. }
  242. req->r_locked_dir = dir; /* caller holds dir->i_mutex */
  243. err = ceph_mdsc_do_request(mdsc,
  244. (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
  245. req);
  246. if (err)
  247. goto out_req;
  248. err = ceph_handle_snapdir(req, dentry, err);
  249. if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
  250. err = ceph_handle_notrace_create(dir, dentry);
  251. if (d_unhashed(dentry)) {
  252. dn = ceph_finish_lookup(req, dentry, err);
  253. if (IS_ERR(dn))
  254. err = PTR_ERR(dn);
  255. } else {
  256. /* we were given a hashed negative dentry */
  257. dn = NULL;
  258. }
  259. if (err)
  260. goto out_req;
  261. if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
  262. /* make vfs retry on splice, ENOENT, or symlink */
  263. dout("atomic_open finish_no_open on dn %p\n", dn);
  264. err = finish_no_open(file, dn);
  265. } else {
  266. dout("atomic_open finish_open on dn %p\n", dn);
  267. if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
  268. ceph_init_inode_acls(dentry->d_inode, &acls);
  269. *opened |= FILE_CREATED;
  270. }
  271. err = finish_open(file, dentry, ceph_open, opened);
  272. }
  273. out_req:
  274. if (!req->r_err && req->r_target_inode)
  275. ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
  276. ceph_mdsc_put_request(req);
  277. out_acl:
  278. ceph_release_acls_info(&acls);
  279. dout("atomic_open result=%d\n", err);
  280. return err;
  281. }
  282. int ceph_release(struct inode *inode, struct file *file)
  283. {
  284. struct ceph_inode_info *ci = ceph_inode(inode);
  285. struct ceph_file_info *cf = file->private_data;
  286. dout("release inode %p file %p\n", inode, file);
  287. ceph_put_fmode(ci, cf->fmode);
  288. if (cf->last_readdir)
  289. ceph_mdsc_put_request(cf->last_readdir);
  290. kfree(cf->last_name);
  291. kfree(cf->dir_info);
  292. dput(cf->dentry);
  293. kmem_cache_free(ceph_file_cachep, cf);
  294. /* wake up anyone waiting for caps on this inode */
  295. wake_up_all(&ci->i_cap_wq);
  296. return 0;
  297. }
  298. enum {
  299. CHECK_EOF = 1,
  300. READ_INLINE = 2,
  301. };
  302. /*
  303. * Read a range of bytes striped over one or more objects. Iterate over
  304. * objects we stripe over. (That's not atomic, but good enough for now.)
  305. *
  306. * If we get a short result from the OSD, check against i_size; we need to
  307. * only return a short read to the caller if we hit EOF.
  308. */
  309. static int striped_read(struct inode *inode,
  310. u64 off, u64 len,
  311. struct page **pages, int num_pages,
  312. int *checkeof, bool o_direct,
  313. unsigned long buf_align)
  314. {
  315. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  316. struct ceph_inode_info *ci = ceph_inode(inode);
  317. u64 pos, this_len, left;
  318. int io_align, page_align;
  319. int pages_left;
  320. int read;
  321. struct page **page_pos;
  322. int ret;
  323. bool hit_stripe, was_short;
  324. /*
  325. * we may need to do multiple reads. not atomic, unfortunately.
  326. */
  327. pos = off;
  328. left = len;
  329. page_pos = pages;
  330. pages_left = num_pages;
  331. read = 0;
  332. io_align = off & ~PAGE_MASK;
  333. more:
  334. if (o_direct)
  335. page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
  336. else
  337. page_align = pos & ~PAGE_MASK;
  338. this_len = left;
  339. ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
  340. &ci->i_layout, pos, &this_len,
  341. ci->i_truncate_seq,
  342. ci->i_truncate_size,
  343. page_pos, pages_left, page_align);
  344. if (ret == -ENOENT)
  345. ret = 0;
  346. hit_stripe = this_len < left;
  347. was_short = ret >= 0 && ret < this_len;
  348. dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
  349. ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
  350. if (ret >= 0) {
  351. int didpages;
  352. if (was_short && (pos + ret < inode->i_size)) {
  353. u64 tmp = min(this_len - ret,
  354. inode->i_size - pos - ret);
  355. dout(" zero gap %llu to %llu\n",
  356. pos + ret, pos + ret + tmp);
  357. ceph_zero_page_vector_range(page_align + read + ret,
  358. tmp, pages);
  359. ret += tmp;
  360. }
  361. didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
  362. pos += ret;
  363. read = pos - off;
  364. left -= ret;
  365. page_pos += didpages;
  366. pages_left -= didpages;
  367. /* hit stripe and need continue*/
  368. if (left && hit_stripe && pos < inode->i_size)
  369. goto more;
  370. }
  371. if (read > 0) {
  372. ret = read;
  373. /* did we bounce off eof? */
  374. if (pos + left > inode->i_size)
  375. *checkeof = CHECK_EOF;
  376. }
  377. dout("striped_read returns %d\n", ret);
  378. return ret;
  379. }
  380. /*
  381. * Completely synchronous read and write methods. Direct from __user
  382. * buffer to osd, or directly to user pages (if O_DIRECT).
  383. *
  384. * If the read spans object boundary, just do multiple reads.
  385. */
  386. static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
  387. int *checkeof)
  388. {
  389. struct file *file = iocb->ki_filp;
  390. struct inode *inode = file_inode(file);
  391. struct page **pages;
  392. u64 off = iocb->ki_pos;
  393. int num_pages, ret;
  394. size_t len = iov_iter_count(i);
  395. dout("sync_read on file %p %llu~%u %s\n", file, off,
  396. (unsigned)len,
  397. (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
  398. if (!len)
  399. return 0;
  400. /*
  401. * flush any page cache pages in this range. this
  402. * will make concurrent normal and sync io slow,
  403. * but it will at least behave sensibly when they are
  404. * in sequence.
  405. */
  406. ret = filemap_write_and_wait_range(inode->i_mapping, off,
  407. off + len);
  408. if (ret < 0)
  409. return ret;
  410. if (file->f_flags & O_DIRECT) {
  411. while (iov_iter_count(i)) {
  412. size_t start;
  413. ssize_t n;
  414. n = iov_iter_get_pages_alloc(i, &pages, INT_MAX, &start);
  415. if (n < 0)
  416. return n;
  417. num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
  418. ret = striped_read(inode, off, n,
  419. pages, num_pages, checkeof,
  420. 1, start);
  421. ceph_put_page_vector(pages, num_pages, true);
  422. if (ret <= 0)
  423. break;
  424. off += ret;
  425. iov_iter_advance(i, ret);
  426. if (ret < n)
  427. break;
  428. }
  429. } else {
  430. num_pages = calc_pages_for(off, len);
  431. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  432. if (IS_ERR(pages))
  433. return PTR_ERR(pages);
  434. ret = striped_read(inode, off, len, pages,
  435. num_pages, checkeof, 0, 0);
  436. if (ret > 0) {
  437. int l, k = 0;
  438. size_t left = ret;
  439. while (left) {
  440. size_t page_off = off & ~PAGE_MASK;
  441. size_t copy = min_t(size_t,
  442. PAGE_SIZE - page_off, left);
  443. l = copy_page_to_iter(pages[k++], page_off,
  444. copy, i);
  445. off += l;
  446. left -= l;
  447. if (l < copy)
  448. break;
  449. }
  450. }
  451. ceph_release_page_vector(pages, num_pages);
  452. }
  453. if (off > iocb->ki_pos) {
  454. ret = off - iocb->ki_pos;
  455. iocb->ki_pos = off;
  456. }
  457. dout("sync_read result %d\n", ret);
  458. return ret;
  459. }
  460. /*
  461. * Write commit request unsafe callback, called to tell us when a
  462. * request is unsafe (that is, in flight--has been handed to the
  463. * messenger to send to its target osd). It is called again when
  464. * we've received a response message indicating the request is
  465. * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
  466. * is completed early (and unsuccessfully) due to a timeout or
  467. * interrupt.
  468. *
  469. * This is used if we requested both an ACK and ONDISK commit reply
  470. * from the OSD.
  471. */
  472. static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
  473. {
  474. struct ceph_inode_info *ci = ceph_inode(req->r_inode);
  475. dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
  476. unsafe ? "un" : "");
  477. if (unsafe) {
  478. ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
  479. spin_lock(&ci->i_unsafe_lock);
  480. list_add_tail(&req->r_unsafe_item,
  481. &ci->i_unsafe_writes);
  482. spin_unlock(&ci->i_unsafe_lock);
  483. } else {
  484. spin_lock(&ci->i_unsafe_lock);
  485. list_del_init(&req->r_unsafe_item);
  486. spin_unlock(&ci->i_unsafe_lock);
  487. ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
  488. }
  489. }
  490. /*
  491. * Synchronous write, straight from __user pointer or user pages.
  492. *
  493. * If write spans object boundary, just do multiple writes. (For a
  494. * correct atomic write, we should e.g. take write locks on all
  495. * objects, rollback on failure, etc.)
  496. */
  497. static ssize_t
  498. ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  499. {
  500. struct file *file = iocb->ki_filp;
  501. struct inode *inode = file_inode(file);
  502. struct ceph_inode_info *ci = ceph_inode(inode);
  503. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  504. struct ceph_snap_context *snapc;
  505. struct ceph_vino vino;
  506. struct ceph_osd_request *req;
  507. struct page **pages;
  508. int num_pages;
  509. int written = 0;
  510. int flags;
  511. int check_caps = 0;
  512. int ret;
  513. struct timespec mtime = CURRENT_TIME;
  514. size_t count = iov_iter_count(from);
  515. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  516. return -EROFS;
  517. dout("sync_direct_write on file %p %lld~%u\n", file, pos,
  518. (unsigned)count);
  519. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  520. if (ret < 0)
  521. return ret;
  522. ret = invalidate_inode_pages2_range(inode->i_mapping,
  523. pos >> PAGE_CACHE_SHIFT,
  524. (pos + count) >> PAGE_CACHE_SHIFT);
  525. if (ret < 0)
  526. dout("invalidate_inode_pages2_range returned %d\n", ret);
  527. flags = CEPH_OSD_FLAG_ORDERSNAP |
  528. CEPH_OSD_FLAG_ONDISK |
  529. CEPH_OSD_FLAG_WRITE;
  530. while (iov_iter_count(from) > 0) {
  531. u64 len = iov_iter_single_seg_count(from);
  532. size_t start;
  533. ssize_t n;
  534. snapc = ci->i_snap_realm->cached_context;
  535. vino = ceph_vino(inode);
  536. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  537. vino, pos, &len, 0,
  538. 2,/*include a 'startsync' command*/
  539. CEPH_OSD_OP_WRITE, flags, snapc,
  540. ci->i_truncate_seq,
  541. ci->i_truncate_size,
  542. false);
  543. if (IS_ERR(req)) {
  544. ret = PTR_ERR(req);
  545. break;
  546. }
  547. osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
  548. n = iov_iter_get_pages_alloc(from, &pages, len, &start);
  549. if (unlikely(n < 0)) {
  550. ret = n;
  551. ceph_osdc_put_request(req);
  552. break;
  553. }
  554. num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
  555. /*
  556. * throw out any page cache pages in this range. this
  557. * may block.
  558. */
  559. truncate_inode_pages_range(inode->i_mapping, pos,
  560. (pos+n) | (PAGE_CACHE_SIZE-1));
  561. osd_req_op_extent_osd_data_pages(req, 0, pages, n, start,
  562. false, false);
  563. /* BUG_ON(vino.snap != CEPH_NOSNAP); */
  564. ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
  565. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  566. if (!ret)
  567. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  568. ceph_put_page_vector(pages, num_pages, false);
  569. ceph_osdc_put_request(req);
  570. if (ret)
  571. break;
  572. pos += n;
  573. written += n;
  574. iov_iter_advance(from, n);
  575. if (pos > i_size_read(inode)) {
  576. check_caps = ceph_inode_set_size(inode, pos);
  577. if (check_caps)
  578. ceph_check_caps(ceph_inode(inode),
  579. CHECK_CAPS_AUTHONLY,
  580. NULL);
  581. }
  582. }
  583. if (ret != -EOLDSNAPC && written > 0) {
  584. iocb->ki_pos = pos;
  585. ret = written;
  586. }
  587. return ret;
  588. }
  589. /*
  590. * Synchronous write, straight from __user pointer or user pages.
  591. *
  592. * If write spans object boundary, just do multiple writes. (For a
  593. * correct atomic write, we should e.g. take write locks on all
  594. * objects, rollback on failure, etc.)
  595. */
  596. static ssize_t
  597. ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  598. {
  599. struct file *file = iocb->ki_filp;
  600. struct inode *inode = file_inode(file);
  601. struct ceph_inode_info *ci = ceph_inode(inode);
  602. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  603. struct ceph_snap_context *snapc;
  604. struct ceph_vino vino;
  605. struct ceph_osd_request *req;
  606. struct page **pages;
  607. u64 len;
  608. int num_pages;
  609. int written = 0;
  610. int flags;
  611. int check_caps = 0;
  612. int ret;
  613. struct timespec mtime = CURRENT_TIME;
  614. size_t count = iov_iter_count(from);
  615. if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
  616. return -EROFS;
  617. dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
  618. ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
  619. if (ret < 0)
  620. return ret;
  621. ret = invalidate_inode_pages2_range(inode->i_mapping,
  622. pos >> PAGE_CACHE_SHIFT,
  623. (pos + count) >> PAGE_CACHE_SHIFT);
  624. if (ret < 0)
  625. dout("invalidate_inode_pages2_range returned %d\n", ret);
  626. flags = CEPH_OSD_FLAG_ORDERSNAP |
  627. CEPH_OSD_FLAG_ONDISK |
  628. CEPH_OSD_FLAG_WRITE |
  629. CEPH_OSD_FLAG_ACK;
  630. while ((len = iov_iter_count(from)) > 0) {
  631. size_t left;
  632. int n;
  633. snapc = ci->i_snap_realm->cached_context;
  634. vino = ceph_vino(inode);
  635. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  636. vino, pos, &len, 0, 1,
  637. CEPH_OSD_OP_WRITE, flags, snapc,
  638. ci->i_truncate_seq,
  639. ci->i_truncate_size,
  640. false);
  641. if (IS_ERR(req)) {
  642. ret = PTR_ERR(req);
  643. break;
  644. }
  645. /*
  646. * write from beginning of first page,
  647. * regardless of io alignment
  648. */
  649. num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  650. pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
  651. if (IS_ERR(pages)) {
  652. ret = PTR_ERR(pages);
  653. goto out;
  654. }
  655. left = len;
  656. for (n = 0; n < num_pages; n++) {
  657. size_t plen = min_t(size_t, left, PAGE_SIZE);
  658. ret = copy_page_from_iter(pages[n], 0, plen, from);
  659. if (ret != plen) {
  660. ret = -EFAULT;
  661. break;
  662. }
  663. left -= ret;
  664. }
  665. if (ret < 0) {
  666. ceph_release_page_vector(pages, num_pages);
  667. goto out;
  668. }
  669. /* get a second commit callback */
  670. req->r_unsafe_callback = ceph_sync_write_unsafe;
  671. req->r_inode = inode;
  672. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
  673. false, true);
  674. /* BUG_ON(vino.snap != CEPH_NOSNAP); */
  675. ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
  676. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  677. if (!ret)
  678. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  679. out:
  680. ceph_osdc_put_request(req);
  681. if (ret == 0) {
  682. pos += len;
  683. written += len;
  684. if (pos > i_size_read(inode)) {
  685. check_caps = ceph_inode_set_size(inode, pos);
  686. if (check_caps)
  687. ceph_check_caps(ceph_inode(inode),
  688. CHECK_CAPS_AUTHONLY,
  689. NULL);
  690. }
  691. } else
  692. break;
  693. }
  694. if (ret != -EOLDSNAPC && written > 0) {
  695. ret = written;
  696. iocb->ki_pos = pos;
  697. }
  698. return ret;
  699. }
  700. /*
  701. * Wrap generic_file_aio_read with checks for cap bits on the inode.
  702. * Atomically grab references, so that those bits are not released
  703. * back to the MDS mid-read.
  704. *
  705. * Hmm, the sync read case isn't actually async... should it be?
  706. */
  707. static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
  708. {
  709. struct file *filp = iocb->ki_filp;
  710. struct ceph_file_info *fi = filp->private_data;
  711. size_t len = iocb->ki_nbytes;
  712. struct inode *inode = file_inode(filp);
  713. struct ceph_inode_info *ci = ceph_inode(inode);
  714. struct page *pinned_page = NULL;
  715. ssize_t ret;
  716. int want, got = 0;
  717. int retry_op = 0, read = 0;
  718. again:
  719. dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
  720. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
  721. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  722. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  723. else
  724. want = CEPH_CAP_FILE_CACHE;
  725. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
  726. if (ret < 0)
  727. return ret;
  728. if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  729. (iocb->ki_filp->f_flags & O_DIRECT) ||
  730. (fi->flags & CEPH_F_SYNC)) {
  731. dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  732. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  733. ceph_cap_string(got));
  734. if (ci->i_inline_version == CEPH_INLINE_NONE) {
  735. /* hmm, this isn't really async... */
  736. ret = ceph_sync_read(iocb, to, &retry_op);
  737. } else {
  738. retry_op = READ_INLINE;
  739. }
  740. } else {
  741. dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
  742. inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
  743. ceph_cap_string(got));
  744. ret = generic_file_read_iter(iocb, to);
  745. }
  746. dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
  747. inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
  748. if (pinned_page) {
  749. page_cache_release(pinned_page);
  750. pinned_page = NULL;
  751. }
  752. ceph_put_cap_refs(ci, got);
  753. if (retry_op && ret >= 0) {
  754. int statret;
  755. struct page *page = NULL;
  756. loff_t i_size;
  757. if (retry_op == READ_INLINE) {
  758. page = __page_cache_alloc(GFP_NOFS);
  759. if (!page)
  760. return -ENOMEM;
  761. }
  762. statret = __ceph_do_getattr(inode, page,
  763. CEPH_STAT_CAP_INLINE_DATA, !!page);
  764. if (statret < 0) {
  765. __free_page(page);
  766. if (statret == -ENODATA) {
  767. BUG_ON(retry_op != READ_INLINE);
  768. goto again;
  769. }
  770. return statret;
  771. }
  772. i_size = i_size_read(inode);
  773. if (retry_op == READ_INLINE) {
  774. /* does not support inline data > PAGE_SIZE */
  775. if (i_size > PAGE_CACHE_SIZE) {
  776. ret = -EIO;
  777. } else if (iocb->ki_pos < i_size) {
  778. loff_t end = min_t(loff_t, i_size,
  779. iocb->ki_pos + len);
  780. if (statret < end)
  781. zero_user_segment(page, statret, end);
  782. ret = copy_page_to_iter(page,
  783. iocb->ki_pos & ~PAGE_MASK,
  784. end - iocb->ki_pos, to);
  785. iocb->ki_pos += ret;
  786. } else {
  787. ret = 0;
  788. }
  789. __free_pages(page, 0);
  790. return ret;
  791. }
  792. /* hit EOF or hole? */
  793. if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
  794. ret < len) {
  795. dout("sync_read hit hole, ppos %lld < size %lld"
  796. ", reading more\n", iocb->ki_pos,
  797. inode->i_size);
  798. read += ret;
  799. len -= ret;
  800. retry_op = 0;
  801. goto again;
  802. }
  803. }
  804. if (ret >= 0)
  805. ret += read;
  806. return ret;
  807. }
  808. /*
  809. * Take cap references to avoid releasing caps to MDS mid-write.
  810. *
  811. * If we are synchronous, and write with an old snap context, the OSD
  812. * may return EOLDSNAPC. In that case, retry the write.. _after_
  813. * dropping our cap refs and allowing the pending snap to logically
  814. * complete _before_ this write occurs.
  815. *
  816. * If we are near ENOSPC, write synchronously.
  817. */
  818. static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
  819. {
  820. struct file *file = iocb->ki_filp;
  821. struct ceph_file_info *fi = file->private_data;
  822. struct inode *inode = file_inode(file);
  823. struct ceph_inode_info *ci = ceph_inode(inode);
  824. struct ceph_osd_client *osdc =
  825. &ceph_sb_to_client(inode->i_sb)->client->osdc;
  826. ssize_t count = iov_iter_count(from), written = 0;
  827. int err, want, got;
  828. loff_t pos = iocb->ki_pos;
  829. if (ceph_snap(inode) != CEPH_NOSNAP)
  830. return -EROFS;
  831. mutex_lock(&inode->i_mutex);
  832. /* We can write back this queue in page reclaim */
  833. current->backing_dev_info = inode_to_bdi(inode);
  834. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  835. if (err)
  836. goto out;
  837. if (count == 0)
  838. goto out;
  839. iov_iter_truncate(from, count);
  840. err = file_remove_suid(file);
  841. if (err)
  842. goto out;
  843. err = file_update_time(file);
  844. if (err)
  845. goto out;
  846. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  847. err = ceph_uninline_data(file, NULL);
  848. if (err < 0)
  849. goto out;
  850. }
  851. retry_snap:
  852. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
  853. err = -ENOSPC;
  854. goto out;
  855. }
  856. dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
  857. inode, ceph_vinop(inode), pos, count, inode->i_size);
  858. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  859. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  860. else
  861. want = CEPH_CAP_FILE_BUFFER;
  862. got = 0;
  863. err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
  864. &got, NULL);
  865. if (err < 0)
  866. goto out;
  867. dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
  868. inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
  869. if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
  870. (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
  871. struct iov_iter data;
  872. mutex_unlock(&inode->i_mutex);
  873. /* we might need to revert back to that point */
  874. data = *from;
  875. if (file->f_flags & O_DIRECT)
  876. written = ceph_sync_direct_write(iocb, &data, pos);
  877. else
  878. written = ceph_sync_write(iocb, &data, pos);
  879. if (written == -EOLDSNAPC) {
  880. dout("aio_write %p %llx.%llx %llu~%u"
  881. "got EOLDSNAPC, retrying\n",
  882. inode, ceph_vinop(inode),
  883. pos, (unsigned)count);
  884. mutex_lock(&inode->i_mutex);
  885. goto retry_snap;
  886. }
  887. if (written > 0)
  888. iov_iter_advance(from, written);
  889. } else {
  890. loff_t old_size = inode->i_size;
  891. /*
  892. * No need to acquire the i_truncate_mutex. Because
  893. * the MDS revokes Fwb caps before sending truncate
  894. * message to us. We can't get Fwb cap while there
  895. * are pending vmtruncate. So write and vmtruncate
  896. * can not run at the same time
  897. */
  898. written = generic_perform_write(file, from, pos);
  899. if (likely(written >= 0))
  900. iocb->ki_pos = pos + written;
  901. if (inode->i_size > old_size)
  902. ceph_fscache_update_objectsize(inode);
  903. mutex_unlock(&inode->i_mutex);
  904. }
  905. if (written >= 0) {
  906. int dirty;
  907. spin_lock(&ci->i_ceph_lock);
  908. ci->i_inline_version = CEPH_INLINE_NONE;
  909. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  910. spin_unlock(&ci->i_ceph_lock);
  911. if (dirty)
  912. __mark_inode_dirty(inode, dirty);
  913. }
  914. dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
  915. inode, ceph_vinop(inode), pos, (unsigned)count,
  916. ceph_cap_string(got));
  917. ceph_put_cap_refs(ci, got);
  918. if (written >= 0 &&
  919. ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
  920. ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
  921. err = vfs_fsync_range(file, pos, pos + written - 1, 1);
  922. if (err < 0)
  923. written = err;
  924. }
  925. goto out_unlocked;
  926. out:
  927. mutex_unlock(&inode->i_mutex);
  928. out_unlocked:
  929. current->backing_dev_info = NULL;
  930. return written ? written : err;
  931. }
  932. /*
  933. * llseek. be sure to verify file size on SEEK_END.
  934. */
  935. static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
  936. {
  937. struct inode *inode = file->f_mapping->host;
  938. int ret;
  939. mutex_lock(&inode->i_mutex);
  940. if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
  941. ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
  942. if (ret < 0) {
  943. offset = ret;
  944. goto out;
  945. }
  946. }
  947. switch (whence) {
  948. case SEEK_END:
  949. offset += inode->i_size;
  950. break;
  951. case SEEK_CUR:
  952. /*
  953. * Here we special-case the lseek(fd, 0, SEEK_CUR)
  954. * position-querying operation. Avoid rewriting the "same"
  955. * f_pos value back to the file because a concurrent read(),
  956. * write() or lseek() might have altered it
  957. */
  958. if (offset == 0) {
  959. offset = file->f_pos;
  960. goto out;
  961. }
  962. offset += file->f_pos;
  963. break;
  964. case SEEK_DATA:
  965. if (offset >= inode->i_size) {
  966. ret = -ENXIO;
  967. goto out;
  968. }
  969. break;
  970. case SEEK_HOLE:
  971. if (offset >= inode->i_size) {
  972. ret = -ENXIO;
  973. goto out;
  974. }
  975. offset = inode->i_size;
  976. break;
  977. }
  978. offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  979. out:
  980. mutex_unlock(&inode->i_mutex);
  981. return offset;
  982. }
  983. static inline void ceph_zero_partial_page(
  984. struct inode *inode, loff_t offset, unsigned size)
  985. {
  986. struct page *page;
  987. pgoff_t index = offset >> PAGE_CACHE_SHIFT;
  988. page = find_lock_page(inode->i_mapping, index);
  989. if (page) {
  990. wait_on_page_writeback(page);
  991. zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
  992. unlock_page(page);
  993. page_cache_release(page);
  994. }
  995. }
  996. static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
  997. loff_t length)
  998. {
  999. loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
  1000. if (offset < nearly) {
  1001. loff_t size = nearly - offset;
  1002. if (length < size)
  1003. size = length;
  1004. ceph_zero_partial_page(inode, offset, size);
  1005. offset += size;
  1006. length -= size;
  1007. }
  1008. if (length >= PAGE_CACHE_SIZE) {
  1009. loff_t size = round_down(length, PAGE_CACHE_SIZE);
  1010. truncate_pagecache_range(inode, offset, offset + size - 1);
  1011. offset += size;
  1012. length -= size;
  1013. }
  1014. if (length)
  1015. ceph_zero_partial_page(inode, offset, length);
  1016. }
  1017. static int ceph_zero_partial_object(struct inode *inode,
  1018. loff_t offset, loff_t *length)
  1019. {
  1020. struct ceph_inode_info *ci = ceph_inode(inode);
  1021. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1022. struct ceph_osd_request *req;
  1023. int ret = 0;
  1024. loff_t zero = 0;
  1025. int op;
  1026. if (!length) {
  1027. op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
  1028. length = &zero;
  1029. } else {
  1030. op = CEPH_OSD_OP_ZERO;
  1031. }
  1032. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1033. ceph_vino(inode),
  1034. offset, length,
  1035. 0, 1, op,
  1036. CEPH_OSD_FLAG_WRITE |
  1037. CEPH_OSD_FLAG_ONDISK,
  1038. NULL, 0, 0, false);
  1039. if (IS_ERR(req)) {
  1040. ret = PTR_ERR(req);
  1041. goto out;
  1042. }
  1043. ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
  1044. &inode->i_mtime);
  1045. ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1046. if (!ret) {
  1047. ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1048. if (ret == -ENOENT)
  1049. ret = 0;
  1050. }
  1051. ceph_osdc_put_request(req);
  1052. out:
  1053. return ret;
  1054. }
  1055. static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
  1056. {
  1057. int ret = 0;
  1058. struct ceph_inode_info *ci = ceph_inode(inode);
  1059. s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
  1060. s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
  1061. s32 object_size = ceph_file_layout_object_size(ci->i_layout);
  1062. u64 object_set_size = object_size * stripe_count;
  1063. u64 nearly, t;
  1064. /* round offset up to next period boundary */
  1065. nearly = offset + object_set_size - 1;
  1066. t = nearly;
  1067. nearly -= do_div(t, object_set_size);
  1068. while (length && offset < nearly) {
  1069. loff_t size = length;
  1070. ret = ceph_zero_partial_object(inode, offset, &size);
  1071. if (ret < 0)
  1072. return ret;
  1073. offset += size;
  1074. length -= size;
  1075. }
  1076. while (length >= object_set_size) {
  1077. int i;
  1078. loff_t pos = offset;
  1079. for (i = 0; i < stripe_count; ++i) {
  1080. ret = ceph_zero_partial_object(inode, pos, NULL);
  1081. if (ret < 0)
  1082. return ret;
  1083. pos += stripe_unit;
  1084. }
  1085. offset += object_set_size;
  1086. length -= object_set_size;
  1087. }
  1088. while (length) {
  1089. loff_t size = length;
  1090. ret = ceph_zero_partial_object(inode, offset, &size);
  1091. if (ret < 0)
  1092. return ret;
  1093. offset += size;
  1094. length -= size;
  1095. }
  1096. return ret;
  1097. }
  1098. static long ceph_fallocate(struct file *file, int mode,
  1099. loff_t offset, loff_t length)
  1100. {
  1101. struct ceph_file_info *fi = file->private_data;
  1102. struct inode *inode = file_inode(file);
  1103. struct ceph_inode_info *ci = ceph_inode(inode);
  1104. struct ceph_osd_client *osdc =
  1105. &ceph_inode_to_client(inode)->client->osdc;
  1106. int want, got = 0;
  1107. int dirty;
  1108. int ret = 0;
  1109. loff_t endoff = 0;
  1110. loff_t size;
  1111. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  1112. return -EOPNOTSUPP;
  1113. if (!S_ISREG(inode->i_mode))
  1114. return -EOPNOTSUPP;
  1115. mutex_lock(&inode->i_mutex);
  1116. if (ceph_snap(inode) != CEPH_NOSNAP) {
  1117. ret = -EROFS;
  1118. goto unlock;
  1119. }
  1120. if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
  1121. !(mode & FALLOC_FL_PUNCH_HOLE)) {
  1122. ret = -ENOSPC;
  1123. goto unlock;
  1124. }
  1125. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1126. ret = ceph_uninline_data(file, NULL);
  1127. if (ret < 0)
  1128. goto unlock;
  1129. }
  1130. size = i_size_read(inode);
  1131. if (!(mode & FALLOC_FL_KEEP_SIZE))
  1132. endoff = offset + length;
  1133. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1134. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1135. else
  1136. want = CEPH_CAP_FILE_BUFFER;
  1137. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
  1138. if (ret < 0)
  1139. goto unlock;
  1140. if (mode & FALLOC_FL_PUNCH_HOLE) {
  1141. if (offset < size)
  1142. ceph_zero_pagecache_range(inode, offset, length);
  1143. ret = ceph_zero_objects(inode, offset, length);
  1144. } else if (endoff > size) {
  1145. truncate_pagecache_range(inode, size, -1);
  1146. if (ceph_inode_set_size(inode, endoff))
  1147. ceph_check_caps(ceph_inode(inode),
  1148. CHECK_CAPS_AUTHONLY, NULL);
  1149. }
  1150. if (!ret) {
  1151. spin_lock(&ci->i_ceph_lock);
  1152. ci->i_inline_version = CEPH_INLINE_NONE;
  1153. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
  1154. spin_unlock(&ci->i_ceph_lock);
  1155. if (dirty)
  1156. __mark_inode_dirty(inode, dirty);
  1157. }
  1158. ceph_put_cap_refs(ci, got);
  1159. unlock:
  1160. mutex_unlock(&inode->i_mutex);
  1161. return ret;
  1162. }
  1163. const struct file_operations ceph_file_fops = {
  1164. .open = ceph_open,
  1165. .release = ceph_release,
  1166. .llseek = ceph_llseek,
  1167. .read = new_sync_read,
  1168. .write = new_sync_write,
  1169. .read_iter = ceph_read_iter,
  1170. .write_iter = ceph_write_iter,
  1171. .mmap = ceph_mmap,
  1172. .fsync = ceph_fsync,
  1173. .lock = ceph_lock,
  1174. .flock = ceph_flock,
  1175. .splice_read = generic_file_splice_read,
  1176. .splice_write = iter_file_splice_write,
  1177. .unlocked_ioctl = ceph_ioctl,
  1178. .compat_ioctl = ceph_ioctl,
  1179. .fallocate = ceph_fallocate,
  1180. };