addr.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/fs.h>
  4. #include <linux/mm.h>
  5. #include <linux/pagemap.h>
  6. #include <linux/writeback.h> /* generic_writepages */
  7. #include <linux/slab.h>
  8. #include <linux/pagevec.h>
  9. #include <linux/task_io_accounting_ops.h>
  10. #include "super.h"
  11. #include "mds_client.h"
  12. #include "cache.h"
  13. #include <linux/ceph/osd_client.h>
  14. /*
  15. * Ceph address space ops.
  16. *
  17. * There are a few funny things going on here.
  18. *
  19. * The page->private field is used to reference a struct
  20. * ceph_snap_context for _every_ dirty page. This indicates which
  21. * snapshot the page was logically dirtied in, and thus which snap
  22. * context needs to be associated with the osd write during writeback.
  23. *
  24. * Similarly, struct ceph_inode_info maintains a set of counters to
  25. * count dirty pages on the inode. In the absence of snapshots,
  26. * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
  27. *
  28. * When a snapshot is taken (that is, when the client receives
  29. * notification that a snapshot was taken), each inode with caps and
  30. * with dirty pages (dirty pages implies there is a cap) gets a new
  31. * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
  32. * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
  33. * moved to capsnap->dirty. (Unless a sync write is currently in
  34. * progress. In that case, the capsnap is said to be "pending", new
  35. * writes cannot start, and the capsnap isn't "finalized" until the
  36. * write completes (or fails) and a final size/mtime for the inode for
  37. * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
  38. *
  39. * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
  40. * we look for the first capsnap in i_cap_snaps and write out pages in
  41. * that snap context _only_. Then we move on to the next capsnap,
  42. * eventually reaching the "live" or "head" context (i.e., pages that
  43. * are not yet snapped) and are writing the most recently dirtied
  44. * pages.
  45. *
  46. * Invalidate and so forth must take care to ensure the dirty page
  47. * accounting is preserved.
  48. */
  49. #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
  50. #define CONGESTION_OFF_THRESH(congestion_kb) \
  51. (CONGESTION_ON_THRESH(congestion_kb) - \
  52. (CONGESTION_ON_THRESH(congestion_kb) >> 2))
  53. static inline struct ceph_snap_context *page_snap_context(struct page *page)
  54. {
  55. if (PagePrivate(page))
  56. return (void *)page->private;
  57. return NULL;
  58. }
  59. /*
  60. * Dirty a page. Optimistically adjust accounting, on the assumption
  61. * that we won't race with invalidate. If we do, readjust.
  62. */
  63. static int ceph_set_page_dirty(struct page *page)
  64. {
  65. struct address_space *mapping = page->mapping;
  66. struct inode *inode;
  67. struct ceph_inode_info *ci;
  68. struct ceph_snap_context *snapc;
  69. int ret;
  70. if (unlikely(!mapping))
  71. return !TestSetPageDirty(page);
  72. if (PageDirty(page)) {
  73. dout("%p set_page_dirty %p idx %lu -- already dirty\n",
  74. mapping->host, page, page->index);
  75. BUG_ON(!PagePrivate(page));
  76. return 0;
  77. }
  78. inode = mapping->host;
  79. ci = ceph_inode(inode);
  80. /* dirty the head */
  81. spin_lock(&ci->i_ceph_lock);
  82. BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
  83. if (__ceph_have_pending_cap_snap(ci)) {
  84. struct ceph_cap_snap *capsnap =
  85. list_last_entry(&ci->i_cap_snaps,
  86. struct ceph_cap_snap,
  87. ci_item);
  88. snapc = ceph_get_snap_context(capsnap->context);
  89. capsnap->dirty_pages++;
  90. } else {
  91. BUG_ON(!ci->i_head_snapc);
  92. snapc = ceph_get_snap_context(ci->i_head_snapc);
  93. ++ci->i_wrbuffer_ref_head;
  94. }
  95. if (ci->i_wrbuffer_ref == 0)
  96. ihold(inode);
  97. ++ci->i_wrbuffer_ref;
  98. dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
  99. "snapc %p seq %lld (%d snaps)\n",
  100. mapping->host, page, page->index,
  101. ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
  102. ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
  103. snapc, snapc->seq, snapc->num_snaps);
  104. spin_unlock(&ci->i_ceph_lock);
  105. /*
  106. * Reference snap context in page->private. Also set
  107. * PagePrivate so that we get invalidatepage callback.
  108. */
  109. BUG_ON(PagePrivate(page));
  110. page->private = (unsigned long)snapc;
  111. SetPagePrivate(page);
  112. ret = __set_page_dirty_nobuffers(page);
  113. WARN_ON(!PageLocked(page));
  114. WARN_ON(!page->mapping);
  115. return ret;
  116. }
  117. /*
  118. * If we are truncating the full page (i.e. offset == 0), adjust the
  119. * dirty page counters appropriately. Only called if there is private
  120. * data on the page.
  121. */
  122. static void ceph_invalidatepage(struct page *page, unsigned int offset,
  123. unsigned int length)
  124. {
  125. struct inode *inode;
  126. struct ceph_inode_info *ci;
  127. struct ceph_snap_context *snapc = page_snap_context(page);
  128. inode = page->mapping->host;
  129. ci = ceph_inode(inode);
  130. if (offset != 0 || length != PAGE_CACHE_SIZE) {
  131. dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
  132. inode, page, page->index, offset, length);
  133. return;
  134. }
  135. ceph_invalidate_fscache_page(inode, page);
  136. if (!PagePrivate(page))
  137. return;
  138. /*
  139. * We can get non-dirty pages here due to races between
  140. * set_page_dirty and truncate_complete_page; just spit out a
  141. * warning, in case we end up with accounting problems later.
  142. */
  143. if (!PageDirty(page))
  144. pr_err("%p invalidatepage %p page not dirty\n", inode, page);
  145. ClearPageChecked(page);
  146. dout("%p invalidatepage %p idx %lu full dirty page\n",
  147. inode, page, page->index);
  148. ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
  149. ceph_put_snap_context(snapc);
  150. page->private = 0;
  151. ClearPagePrivate(page);
  152. }
  153. static int ceph_releasepage(struct page *page, gfp_t g)
  154. {
  155. struct inode *inode = page->mapping ? page->mapping->host : NULL;
  156. dout("%p releasepage %p idx %lu\n", inode, page, page->index);
  157. WARN_ON(PageDirty(page));
  158. /* Can we release the page from the cache? */
  159. if (!ceph_release_fscache_page(page, g))
  160. return 0;
  161. return !PagePrivate(page);
  162. }
  163. /*
  164. * read a single page, without unlocking it.
  165. */
  166. static int readpage_nounlock(struct file *filp, struct page *page)
  167. {
  168. struct inode *inode = file_inode(filp);
  169. struct ceph_inode_info *ci = ceph_inode(inode);
  170. struct ceph_osd_client *osdc =
  171. &ceph_inode_to_client(inode)->client->osdc;
  172. int err = 0;
  173. u64 off = page_offset(page);
  174. u64 len = PAGE_CACHE_SIZE;
  175. if (off >= i_size_read(inode)) {
  176. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  177. SetPageUptodate(page);
  178. return 0;
  179. }
  180. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  181. /*
  182. * Uptodate inline data should have been added
  183. * into page cache while getting Fcr caps.
  184. */
  185. if (off == 0)
  186. return -EINVAL;
  187. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  188. SetPageUptodate(page);
  189. return 0;
  190. }
  191. err = ceph_readpage_from_fscache(inode, page);
  192. if (err == 0)
  193. goto out;
  194. dout("readpage inode %p file %p page %p index %lu\n",
  195. inode, filp, page, page->index);
  196. err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
  197. off, &len,
  198. ci->i_truncate_seq, ci->i_truncate_size,
  199. &page, 1, 0);
  200. if (err == -ENOENT)
  201. err = 0;
  202. if (err < 0) {
  203. SetPageError(page);
  204. ceph_fscache_readpage_cancel(inode, page);
  205. goto out;
  206. }
  207. if (err < PAGE_CACHE_SIZE)
  208. /* zero fill remainder of page */
  209. zero_user_segment(page, err, PAGE_CACHE_SIZE);
  210. else
  211. flush_dcache_page(page);
  212. SetPageUptodate(page);
  213. ceph_readpage_to_fscache(inode, page);
  214. out:
  215. return err < 0 ? err : 0;
  216. }
  217. static int ceph_readpage(struct file *filp, struct page *page)
  218. {
  219. int r = readpage_nounlock(filp, page);
  220. unlock_page(page);
  221. return r;
  222. }
  223. /*
  224. * Finish an async read(ahead) op.
  225. */
  226. static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
  227. {
  228. struct inode *inode = req->r_inode;
  229. struct ceph_osd_data *osd_data;
  230. int rc = req->r_result;
  231. int bytes = le32_to_cpu(msg->hdr.data_len);
  232. int num_pages;
  233. int i;
  234. dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
  235. /* unlock all pages, zeroing any data we didn't read */
  236. osd_data = osd_req_op_extent_osd_data(req, 0);
  237. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  238. num_pages = calc_pages_for((u64)osd_data->alignment,
  239. (u64)osd_data->length);
  240. for (i = 0; i < num_pages; i++) {
  241. struct page *page = osd_data->pages[i];
  242. if (rc < 0 && rc != ENOENT)
  243. goto unlock;
  244. if (bytes < (int)PAGE_CACHE_SIZE) {
  245. /* zero (remainder of) page */
  246. int s = bytes < 0 ? 0 : bytes;
  247. zero_user_segment(page, s, PAGE_CACHE_SIZE);
  248. }
  249. dout("finish_read %p uptodate %p idx %lu\n", inode, page,
  250. page->index);
  251. flush_dcache_page(page);
  252. SetPageUptodate(page);
  253. ceph_readpage_to_fscache(inode, page);
  254. unlock:
  255. unlock_page(page);
  256. page_cache_release(page);
  257. bytes -= PAGE_CACHE_SIZE;
  258. }
  259. kfree(osd_data->pages);
  260. }
  261. static void ceph_unlock_page_vector(struct page **pages, int num_pages)
  262. {
  263. int i;
  264. for (i = 0; i < num_pages; i++)
  265. unlock_page(pages[i]);
  266. }
  267. /*
  268. * start an async read(ahead) operation. return nr_pages we submitted
  269. * a read for on success, or negative error code.
  270. */
  271. static int start_read(struct inode *inode, struct list_head *page_list, int max)
  272. {
  273. struct ceph_osd_client *osdc =
  274. &ceph_inode_to_client(inode)->client->osdc;
  275. struct ceph_inode_info *ci = ceph_inode(inode);
  276. struct page *page = list_entry(page_list->prev, struct page, lru);
  277. struct ceph_vino vino;
  278. struct ceph_osd_request *req;
  279. u64 off;
  280. u64 len;
  281. int i;
  282. struct page **pages;
  283. pgoff_t next_index;
  284. int nr_pages = 0;
  285. int ret;
  286. off = (u64) page_offset(page);
  287. /* count pages */
  288. next_index = page->index;
  289. list_for_each_entry_reverse(page, page_list, lru) {
  290. if (page->index != next_index)
  291. break;
  292. nr_pages++;
  293. next_index++;
  294. if (max && nr_pages == max)
  295. break;
  296. }
  297. len = nr_pages << PAGE_CACHE_SHIFT;
  298. dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
  299. off, len);
  300. vino = ceph_vino(inode);
  301. req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
  302. 0, 1, CEPH_OSD_OP_READ,
  303. CEPH_OSD_FLAG_READ, NULL,
  304. ci->i_truncate_seq, ci->i_truncate_size,
  305. false);
  306. if (IS_ERR(req))
  307. return PTR_ERR(req);
  308. /* build page vector */
  309. nr_pages = calc_pages_for(0, len);
  310. pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
  311. ret = -ENOMEM;
  312. if (!pages)
  313. goto out;
  314. for (i = 0; i < nr_pages; ++i) {
  315. page = list_entry(page_list->prev, struct page, lru);
  316. BUG_ON(PageLocked(page));
  317. list_del(&page->lru);
  318. dout("start_read %p adding %p idx %lu\n", inode, page,
  319. page->index);
  320. if (add_to_page_cache_lru(page, &inode->i_data, page->index,
  321. GFP_KERNEL)) {
  322. ceph_fscache_uncache_page(inode, page);
  323. page_cache_release(page);
  324. dout("start_read %p add_to_page_cache failed %p\n",
  325. inode, page);
  326. nr_pages = i;
  327. goto out_pages;
  328. }
  329. pages[i] = page;
  330. }
  331. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
  332. req->r_callback = finish_read;
  333. req->r_inode = inode;
  334. ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
  335. dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
  336. ret = ceph_osdc_start_request(osdc, req, false);
  337. if (ret < 0)
  338. goto out_pages;
  339. ceph_osdc_put_request(req);
  340. return nr_pages;
  341. out_pages:
  342. ceph_unlock_page_vector(pages, nr_pages);
  343. ceph_release_page_vector(pages, nr_pages);
  344. out:
  345. ceph_osdc_put_request(req);
  346. return ret;
  347. }
  348. /*
  349. * Read multiple pages. Leave pages we don't read + unlock in page_list;
  350. * the caller (VM) cleans them up.
  351. */
  352. static int ceph_readpages(struct file *file, struct address_space *mapping,
  353. struct list_head *page_list, unsigned nr_pages)
  354. {
  355. struct inode *inode = file_inode(file);
  356. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  357. int rc = 0;
  358. int max = 0;
  359. if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
  360. return -EINVAL;
  361. rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
  362. &nr_pages);
  363. if (rc == 0)
  364. goto out;
  365. if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
  366. max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
  367. >> PAGE_SHIFT;
  368. dout("readpages %p file %p nr_pages %d max %d\n", inode,
  369. file, nr_pages,
  370. max);
  371. while (!list_empty(page_list)) {
  372. rc = start_read(inode, page_list, max);
  373. if (rc < 0)
  374. goto out;
  375. BUG_ON(rc == 0);
  376. }
  377. out:
  378. ceph_fscache_readpages_cancel(inode, page_list);
  379. dout("readpages %p file %p ret %d\n", inode, file, rc);
  380. return rc;
  381. }
  382. /*
  383. * Get ref for the oldest snapc for an inode with dirty data... that is, the
  384. * only snap context we are allowed to write back.
  385. */
  386. static struct ceph_snap_context *get_oldest_context(struct inode *inode,
  387. loff_t *snap_size)
  388. {
  389. struct ceph_inode_info *ci = ceph_inode(inode);
  390. struct ceph_snap_context *snapc = NULL;
  391. struct ceph_cap_snap *capsnap = NULL;
  392. spin_lock(&ci->i_ceph_lock);
  393. list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
  394. dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
  395. capsnap->context, capsnap->dirty_pages);
  396. if (capsnap->dirty_pages) {
  397. snapc = ceph_get_snap_context(capsnap->context);
  398. if (snap_size)
  399. *snap_size = capsnap->size;
  400. break;
  401. }
  402. }
  403. if (!snapc && ci->i_wrbuffer_ref_head) {
  404. snapc = ceph_get_snap_context(ci->i_head_snapc);
  405. dout(" head snapc %p has %d dirty pages\n",
  406. snapc, ci->i_wrbuffer_ref_head);
  407. }
  408. spin_unlock(&ci->i_ceph_lock);
  409. return snapc;
  410. }
  411. /*
  412. * Write a single page, but leave the page locked.
  413. *
  414. * If we get a write error, set the page error bit, but still adjust the
  415. * dirty page accounting (i.e., page is no longer dirty).
  416. */
  417. static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
  418. {
  419. struct inode *inode;
  420. struct ceph_inode_info *ci;
  421. struct ceph_fs_client *fsc;
  422. struct ceph_osd_client *osdc;
  423. struct ceph_snap_context *snapc, *oldest;
  424. loff_t page_off = page_offset(page);
  425. loff_t snap_size = -1;
  426. long writeback_stat;
  427. u64 truncate_size;
  428. u32 truncate_seq;
  429. int err = 0, len = PAGE_CACHE_SIZE;
  430. dout("writepage %p idx %lu\n", page, page->index);
  431. if (!page->mapping || !page->mapping->host) {
  432. dout("writepage %p - no mapping\n", page);
  433. return -EFAULT;
  434. }
  435. inode = page->mapping->host;
  436. ci = ceph_inode(inode);
  437. fsc = ceph_inode_to_client(inode);
  438. osdc = &fsc->client->osdc;
  439. /* verify this is a writeable snap context */
  440. snapc = page_snap_context(page);
  441. if (snapc == NULL) {
  442. dout("writepage %p page %p not dirty?\n", inode, page);
  443. goto out;
  444. }
  445. oldest = get_oldest_context(inode, &snap_size);
  446. if (snapc->seq > oldest->seq) {
  447. dout("writepage %p page %p snapc %p not writeable - noop\n",
  448. inode, page, snapc);
  449. /* we should only noop if called by kswapd */
  450. WARN_ON((current->flags & PF_MEMALLOC) == 0);
  451. ceph_put_snap_context(oldest);
  452. goto out;
  453. }
  454. ceph_put_snap_context(oldest);
  455. spin_lock(&ci->i_ceph_lock);
  456. truncate_seq = ci->i_truncate_seq;
  457. truncate_size = ci->i_truncate_size;
  458. if (snap_size == -1)
  459. snap_size = i_size_read(inode);
  460. spin_unlock(&ci->i_ceph_lock);
  461. /* is this a partial page at end of file? */
  462. if (page_off >= snap_size) {
  463. dout("%p page eof %llu\n", page, snap_size);
  464. goto out;
  465. }
  466. if (snap_size < page_off + len)
  467. len = snap_size - page_off;
  468. dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
  469. inode, page, page->index, page_off, len, snapc);
  470. writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
  471. if (writeback_stat >
  472. CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
  473. set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
  474. ceph_readpage_to_fscache(inode, page);
  475. set_page_writeback(page);
  476. err = ceph_osdc_writepages(osdc, ceph_vino(inode),
  477. &ci->i_layout, snapc,
  478. page_off, len,
  479. truncate_seq, truncate_size,
  480. &inode->i_mtime, &page, 1);
  481. if (err < 0) {
  482. dout("writepage setting page/mapping error %d %p\n", err, page);
  483. SetPageError(page);
  484. mapping_set_error(&inode->i_data, err);
  485. if (wbc)
  486. wbc->pages_skipped++;
  487. } else {
  488. dout("writepage cleaned page %p\n", page);
  489. err = 0; /* vfs expects us to return 0 */
  490. }
  491. page->private = 0;
  492. ClearPagePrivate(page);
  493. end_page_writeback(page);
  494. ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
  495. ceph_put_snap_context(snapc); /* page's reference */
  496. out:
  497. return err;
  498. }
  499. static int ceph_writepage(struct page *page, struct writeback_control *wbc)
  500. {
  501. int err;
  502. struct inode *inode = page->mapping->host;
  503. BUG_ON(!inode);
  504. ihold(inode);
  505. err = writepage_nounlock(page, wbc);
  506. unlock_page(page);
  507. iput(inode);
  508. return err;
  509. }
  510. /*
  511. * lame release_pages helper. release_pages() isn't exported to
  512. * modules.
  513. */
  514. static void ceph_release_pages(struct page **pages, int num)
  515. {
  516. struct pagevec pvec;
  517. int i;
  518. pagevec_init(&pvec, 0);
  519. for (i = 0; i < num; i++) {
  520. if (pagevec_add(&pvec, pages[i]) == 0)
  521. pagevec_release(&pvec);
  522. }
  523. pagevec_release(&pvec);
  524. }
  525. /*
  526. * async writeback completion handler.
  527. *
  528. * If we get an error, set the mapping error bit, but not the individual
  529. * page error bits.
  530. */
  531. static void writepages_finish(struct ceph_osd_request *req,
  532. struct ceph_msg *msg)
  533. {
  534. struct inode *inode = req->r_inode;
  535. struct ceph_inode_info *ci = ceph_inode(inode);
  536. struct ceph_osd_data *osd_data;
  537. unsigned wrote;
  538. struct page *page;
  539. int num_pages;
  540. int i;
  541. struct ceph_snap_context *snapc = req->r_snapc;
  542. struct address_space *mapping = inode->i_mapping;
  543. int rc = req->r_result;
  544. u64 bytes = req->r_ops[0].extent.length;
  545. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  546. long writeback_stat;
  547. unsigned issued = ceph_caps_issued(ci);
  548. osd_data = osd_req_op_extent_osd_data(req, 0);
  549. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  550. num_pages = calc_pages_for((u64)osd_data->alignment,
  551. (u64)osd_data->length);
  552. if (rc >= 0) {
  553. /*
  554. * Assume we wrote the pages we originally sent. The
  555. * osd might reply with fewer pages if our writeback
  556. * raced with a truncation and was adjusted at the osd,
  557. * so don't believe the reply.
  558. */
  559. wrote = num_pages;
  560. } else {
  561. wrote = 0;
  562. mapping_set_error(mapping, rc);
  563. }
  564. dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
  565. inode, rc, bytes, wrote);
  566. /* clean all pages */
  567. for (i = 0; i < num_pages; i++) {
  568. page = osd_data->pages[i];
  569. BUG_ON(!page);
  570. WARN_ON(!PageUptodate(page));
  571. writeback_stat =
  572. atomic_long_dec_return(&fsc->writeback_count);
  573. if (writeback_stat <
  574. CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
  575. clear_bdi_congested(&fsc->backing_dev_info,
  576. BLK_RW_ASYNC);
  577. ceph_put_snap_context(page_snap_context(page));
  578. page->private = 0;
  579. ClearPagePrivate(page);
  580. dout("unlocking %d %p\n", i, page);
  581. end_page_writeback(page);
  582. /*
  583. * We lost the cache cap, need to truncate the page before
  584. * it is unlocked, otherwise we'd truncate it later in the
  585. * page truncation thread, possibly losing some data that
  586. * raced its way in
  587. */
  588. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
  589. generic_error_remove_page(inode->i_mapping, page);
  590. unlock_page(page);
  591. }
  592. dout("%p wrote+cleaned %d pages\n", inode, wrote);
  593. ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc);
  594. ceph_release_pages(osd_data->pages, num_pages);
  595. if (osd_data->pages_from_pool)
  596. mempool_free(osd_data->pages,
  597. ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
  598. else
  599. kfree(osd_data->pages);
  600. ceph_osdc_put_request(req);
  601. }
  602. /*
  603. * initiate async writeback
  604. */
  605. static int ceph_writepages_start(struct address_space *mapping,
  606. struct writeback_control *wbc)
  607. {
  608. struct inode *inode = mapping->host;
  609. struct ceph_inode_info *ci = ceph_inode(inode);
  610. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  611. struct ceph_vino vino = ceph_vino(inode);
  612. pgoff_t index, start, end;
  613. int range_whole = 0;
  614. int should_loop = 1;
  615. pgoff_t max_pages = 0, max_pages_ever = 0;
  616. struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
  617. struct pagevec pvec;
  618. int done = 0;
  619. int rc = 0;
  620. unsigned wsize = 1 << inode->i_blkbits;
  621. struct ceph_osd_request *req = NULL;
  622. int do_sync = 0;
  623. loff_t snap_size, i_size;
  624. u64 truncate_size;
  625. u32 truncate_seq;
  626. /*
  627. * Include a 'sync' in the OSD request if this is a data
  628. * integrity write (e.g., O_SYNC write or fsync()), or if our
  629. * cap is being revoked.
  630. */
  631. if ((wbc->sync_mode == WB_SYNC_ALL) ||
  632. ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
  633. do_sync = 1;
  634. dout("writepages_start %p dosync=%d (mode=%s)\n",
  635. inode, do_sync,
  636. wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
  637. (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
  638. if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
  639. pr_warn("writepage_start %p on forced umount\n", inode);
  640. truncate_pagecache(inode, 0);
  641. mapping_set_error(mapping, -EIO);
  642. return -EIO; /* we're in a forced umount, don't write! */
  643. }
  644. if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
  645. wsize = fsc->mount_options->wsize;
  646. if (wsize < PAGE_CACHE_SIZE)
  647. wsize = PAGE_CACHE_SIZE;
  648. max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
  649. pagevec_init(&pvec, 0);
  650. /* where to start/end? */
  651. if (wbc->range_cyclic) {
  652. start = mapping->writeback_index; /* Start from prev offset */
  653. end = -1;
  654. dout(" cyclic, start at %lu\n", start);
  655. } else {
  656. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  657. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  658. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  659. range_whole = 1;
  660. should_loop = 0;
  661. dout(" not cyclic, %lu to %lu\n", start, end);
  662. }
  663. index = start;
  664. retry:
  665. /* find oldest snap context with dirty data */
  666. ceph_put_snap_context(snapc);
  667. snap_size = -1;
  668. snapc = get_oldest_context(inode, &snap_size);
  669. if (!snapc) {
  670. /* hmm, why does writepages get called when there
  671. is no dirty data? */
  672. dout(" no snap context with dirty data?\n");
  673. goto out;
  674. }
  675. dout(" oldest snapc is %p seq %lld (%d snaps)\n",
  676. snapc, snapc->seq, snapc->num_snaps);
  677. spin_lock(&ci->i_ceph_lock);
  678. truncate_seq = ci->i_truncate_seq;
  679. truncate_size = ci->i_truncate_size;
  680. i_size = i_size_read(inode);
  681. spin_unlock(&ci->i_ceph_lock);
  682. if (last_snapc && snapc != last_snapc) {
  683. /* if we switched to a newer snapc, restart our scan at the
  684. * start of the original file range. */
  685. dout(" snapc differs from last pass, restarting at %lu\n",
  686. index);
  687. index = start;
  688. }
  689. last_snapc = snapc;
  690. while (!done && index <= end) {
  691. unsigned i;
  692. int first;
  693. pgoff_t next;
  694. int pvec_pages, locked_pages;
  695. struct page **pages = NULL;
  696. mempool_t *pool = NULL; /* Becomes non-null if mempool used */
  697. struct page *page;
  698. int want;
  699. u64 offset, len;
  700. long writeback_stat;
  701. next = 0;
  702. locked_pages = 0;
  703. max_pages = max_pages_ever;
  704. get_more_pages:
  705. first = -1;
  706. want = min(end - index,
  707. min((pgoff_t)PAGEVEC_SIZE,
  708. max_pages - (pgoff_t)locked_pages) - 1)
  709. + 1;
  710. pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  711. PAGECACHE_TAG_DIRTY,
  712. want);
  713. dout("pagevec_lookup_tag got %d\n", pvec_pages);
  714. if (!pvec_pages && !locked_pages)
  715. break;
  716. for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
  717. page = pvec.pages[i];
  718. dout("? %p idx %lu\n", page, page->index);
  719. if (locked_pages == 0)
  720. lock_page(page); /* first page */
  721. else if (!trylock_page(page))
  722. break;
  723. /* only dirty pages, or our accounting breaks */
  724. if (unlikely(!PageDirty(page)) ||
  725. unlikely(page->mapping != mapping)) {
  726. dout("!dirty or !mapping %p\n", page);
  727. unlock_page(page);
  728. break;
  729. }
  730. if (!wbc->range_cyclic && page->index > end) {
  731. dout("end of range %p\n", page);
  732. done = 1;
  733. unlock_page(page);
  734. break;
  735. }
  736. if (next && (page->index != next)) {
  737. dout("not consecutive %p\n", page);
  738. unlock_page(page);
  739. break;
  740. }
  741. if (wbc->sync_mode != WB_SYNC_NONE) {
  742. dout("waiting on writeback %p\n", page);
  743. wait_on_page_writeback(page);
  744. }
  745. if (page_offset(page) >=
  746. (snap_size == -1 ? i_size : snap_size)) {
  747. dout("%p page eof %llu\n", page,
  748. (snap_size == -1 ? i_size : snap_size));
  749. done = 1;
  750. unlock_page(page);
  751. break;
  752. }
  753. if (PageWriteback(page)) {
  754. dout("%p under writeback\n", page);
  755. unlock_page(page);
  756. break;
  757. }
  758. /* only if matching snap context */
  759. pgsnapc = page_snap_context(page);
  760. if (pgsnapc->seq > snapc->seq) {
  761. dout("page snapc %p %lld > oldest %p %lld\n",
  762. pgsnapc, pgsnapc->seq, snapc, snapc->seq);
  763. unlock_page(page);
  764. if (!locked_pages)
  765. continue; /* keep looking for snap */
  766. break;
  767. }
  768. if (!clear_page_dirty_for_io(page)) {
  769. dout("%p !clear_page_dirty_for_io\n", page);
  770. unlock_page(page);
  771. break;
  772. }
  773. /*
  774. * We have something to write. If this is
  775. * the first locked page this time through,
  776. * allocate an osd request and a page array
  777. * that it will use.
  778. */
  779. if (locked_pages == 0) {
  780. BUG_ON(pages);
  781. /* prepare async write request */
  782. offset = (u64)page_offset(page);
  783. len = wsize;
  784. req = ceph_osdc_new_request(&fsc->client->osdc,
  785. &ci->i_layout, vino,
  786. offset, &len, 0,
  787. do_sync ? 2 : 1,
  788. CEPH_OSD_OP_WRITE,
  789. CEPH_OSD_FLAG_WRITE |
  790. CEPH_OSD_FLAG_ONDISK,
  791. snapc, truncate_seq,
  792. truncate_size, true);
  793. if (IS_ERR(req)) {
  794. rc = PTR_ERR(req);
  795. unlock_page(page);
  796. break;
  797. }
  798. if (do_sync)
  799. osd_req_op_init(req, 1,
  800. CEPH_OSD_OP_STARTSYNC, 0);
  801. req->r_callback = writepages_finish;
  802. req->r_inode = inode;
  803. max_pages = calc_pages_for(0, (u64)len);
  804. pages = kmalloc(max_pages * sizeof (*pages),
  805. GFP_NOFS);
  806. if (!pages) {
  807. pool = fsc->wb_pagevec_pool;
  808. pages = mempool_alloc(pool, GFP_NOFS);
  809. BUG_ON(!pages);
  810. }
  811. }
  812. /* note position of first page in pvec */
  813. if (first < 0)
  814. first = i;
  815. dout("%p will write page %p idx %lu\n",
  816. inode, page, page->index);
  817. writeback_stat =
  818. atomic_long_inc_return(&fsc->writeback_count);
  819. if (writeback_stat > CONGESTION_ON_THRESH(
  820. fsc->mount_options->congestion_kb)) {
  821. set_bdi_congested(&fsc->backing_dev_info,
  822. BLK_RW_ASYNC);
  823. }
  824. set_page_writeback(page);
  825. pages[locked_pages] = page;
  826. locked_pages++;
  827. next = page->index + 1;
  828. }
  829. /* did we get anything? */
  830. if (!locked_pages)
  831. goto release_pvec_pages;
  832. if (i) {
  833. int j;
  834. BUG_ON(!locked_pages || first < 0);
  835. if (pvec_pages && i == pvec_pages &&
  836. locked_pages < max_pages) {
  837. dout("reached end pvec, trying for more\n");
  838. pagevec_reinit(&pvec);
  839. goto get_more_pages;
  840. }
  841. /* shift unused pages over in the pvec... we
  842. * will need to release them below. */
  843. for (j = i; j < pvec_pages; j++) {
  844. dout(" pvec leftover page %p\n",
  845. pvec.pages[j]);
  846. pvec.pages[j-i+first] = pvec.pages[j];
  847. }
  848. pvec.nr -= i-first;
  849. }
  850. /* Format the osd request message and submit the write */
  851. offset = page_offset(pages[0]);
  852. len = (u64)locked_pages << PAGE_CACHE_SHIFT;
  853. if (snap_size == -1) {
  854. len = min(len, (u64)i_size_read(inode) - offset);
  855. /* writepages_finish() clears writeback pages
  856. * according to the data length, so make sure
  857. * data length covers all locked pages */
  858. len = max(len, 1 +
  859. ((u64)(locked_pages - 1) << PAGE_CACHE_SHIFT));
  860. } else {
  861. len = min(len, snap_size - offset);
  862. }
  863. dout("writepages got %d pages at %llu~%llu\n",
  864. locked_pages, offset, len);
  865. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
  866. !!pool, false);
  867. pages = NULL; /* request message now owns the pages array */
  868. pool = NULL;
  869. /* Update the write op length in case we changed it */
  870. osd_req_op_extent_update(req, 0, len);
  871. vino = ceph_vino(inode);
  872. ceph_osdc_build_request(req, offset, snapc, vino.snap,
  873. &inode->i_mtime);
  874. rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
  875. BUG_ON(rc);
  876. req = NULL;
  877. /* continue? */
  878. index = next;
  879. wbc->nr_to_write -= locked_pages;
  880. if (wbc->nr_to_write <= 0)
  881. done = 1;
  882. release_pvec_pages:
  883. dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
  884. pvec.nr ? pvec.pages[0] : NULL);
  885. pagevec_release(&pvec);
  886. if (locked_pages && !done)
  887. goto retry;
  888. }
  889. if (should_loop && !done) {
  890. /* more to do; loop back to beginning of file */
  891. dout("writepages looping back to beginning of file\n");
  892. should_loop = 0;
  893. index = 0;
  894. goto retry;
  895. }
  896. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  897. mapping->writeback_index = index;
  898. out:
  899. if (req)
  900. ceph_osdc_put_request(req);
  901. ceph_put_snap_context(snapc);
  902. dout("writepages done, rc = %d\n", rc);
  903. return rc;
  904. }
  905. /*
  906. * See if a given @snapc is either writeable, or already written.
  907. */
  908. static int context_is_writeable_or_written(struct inode *inode,
  909. struct ceph_snap_context *snapc)
  910. {
  911. struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
  912. int ret = !oldest || snapc->seq <= oldest->seq;
  913. ceph_put_snap_context(oldest);
  914. return ret;
  915. }
  916. /*
  917. * We are only allowed to write into/dirty the page if the page is
  918. * clean, or already dirty within the same snap context.
  919. *
  920. * called with page locked.
  921. * return success with page locked,
  922. * or any failure (incl -EAGAIN) with page unlocked.
  923. */
  924. static int ceph_update_writeable_page(struct file *file,
  925. loff_t pos, unsigned len,
  926. struct page *page)
  927. {
  928. struct inode *inode = file_inode(file);
  929. struct ceph_inode_info *ci = ceph_inode(inode);
  930. loff_t page_off = pos & PAGE_CACHE_MASK;
  931. int pos_in_page = pos & ~PAGE_CACHE_MASK;
  932. int end_in_page = pos_in_page + len;
  933. loff_t i_size;
  934. int r;
  935. struct ceph_snap_context *snapc, *oldest;
  936. retry_locked:
  937. /* writepages currently holds page lock, but if we change that later, */
  938. wait_on_page_writeback(page);
  939. snapc = page_snap_context(page);
  940. if (snapc && snapc != ci->i_head_snapc) {
  941. /*
  942. * this page is already dirty in another (older) snap
  943. * context! is it writeable now?
  944. */
  945. oldest = get_oldest_context(inode, NULL);
  946. if (snapc->seq > oldest->seq) {
  947. ceph_put_snap_context(oldest);
  948. dout(" page %p snapc %p not current or oldest\n",
  949. page, snapc);
  950. /*
  951. * queue for writeback, and wait for snapc to
  952. * be writeable or written
  953. */
  954. snapc = ceph_get_snap_context(snapc);
  955. unlock_page(page);
  956. ceph_queue_writeback(inode);
  957. r = wait_event_interruptible(ci->i_cap_wq,
  958. context_is_writeable_or_written(inode, snapc));
  959. ceph_put_snap_context(snapc);
  960. if (r == -ERESTARTSYS)
  961. return r;
  962. return -EAGAIN;
  963. }
  964. ceph_put_snap_context(oldest);
  965. /* yay, writeable, do it now (without dropping page lock) */
  966. dout(" page %p snapc %p not current, but oldest\n",
  967. page, snapc);
  968. if (!clear_page_dirty_for_io(page))
  969. goto retry_locked;
  970. r = writepage_nounlock(page, NULL);
  971. if (r < 0)
  972. goto fail_nosnap;
  973. goto retry_locked;
  974. }
  975. if (PageUptodate(page)) {
  976. dout(" page %p already uptodate\n", page);
  977. return 0;
  978. }
  979. /* full page? */
  980. if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
  981. return 0;
  982. /* past end of file? */
  983. i_size = inode->i_size; /* caller holds i_mutex */
  984. if (page_off >= i_size ||
  985. (pos_in_page == 0 && (pos+len) >= i_size &&
  986. end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
  987. dout(" zeroing %p 0 - %d and %d - %d\n",
  988. page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
  989. zero_user_segments(page,
  990. 0, pos_in_page,
  991. end_in_page, PAGE_CACHE_SIZE);
  992. return 0;
  993. }
  994. /* we need to read it. */
  995. r = readpage_nounlock(file, page);
  996. if (r < 0)
  997. goto fail_nosnap;
  998. goto retry_locked;
  999. fail_nosnap:
  1000. unlock_page(page);
  1001. return r;
  1002. }
  1003. /*
  1004. * We are only allowed to write into/dirty the page if the page is
  1005. * clean, or already dirty within the same snap context.
  1006. */
  1007. static int ceph_write_begin(struct file *file, struct address_space *mapping,
  1008. loff_t pos, unsigned len, unsigned flags,
  1009. struct page **pagep, void **fsdata)
  1010. {
  1011. struct inode *inode = file_inode(file);
  1012. struct page *page;
  1013. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  1014. int r;
  1015. do {
  1016. /* get a page */
  1017. page = grab_cache_page_write_begin(mapping, index, 0);
  1018. if (!page)
  1019. return -ENOMEM;
  1020. *pagep = page;
  1021. dout("write_begin file %p inode %p page %p %d~%d\n", file,
  1022. inode, page, (int)pos, (int)len);
  1023. r = ceph_update_writeable_page(file, pos, len, page);
  1024. if (r < 0)
  1025. page_cache_release(page);
  1026. else
  1027. *pagep = page;
  1028. } while (r == -EAGAIN);
  1029. return r;
  1030. }
  1031. /*
  1032. * we don't do anything in here that simple_write_end doesn't do
  1033. * except adjust dirty page accounting
  1034. */
  1035. static int ceph_write_end(struct file *file, struct address_space *mapping,
  1036. loff_t pos, unsigned len, unsigned copied,
  1037. struct page *page, void *fsdata)
  1038. {
  1039. struct inode *inode = file_inode(file);
  1040. unsigned from = pos & (PAGE_CACHE_SIZE - 1);
  1041. int check_cap = 0;
  1042. dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
  1043. inode, page, (int)pos, (int)copied, (int)len);
  1044. /* zero the stale part of the page if we did a short copy */
  1045. if (copied < len)
  1046. zero_user_segment(page, from+copied, len);
  1047. /* did file size increase? */
  1048. /* (no need for i_size_read(); we caller holds i_mutex */
  1049. if (pos+copied > inode->i_size)
  1050. check_cap = ceph_inode_set_size(inode, pos+copied);
  1051. if (!PageUptodate(page))
  1052. SetPageUptodate(page);
  1053. set_page_dirty(page);
  1054. unlock_page(page);
  1055. page_cache_release(page);
  1056. if (check_cap)
  1057. ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
  1058. return copied;
  1059. }
  1060. /*
  1061. * we set .direct_IO to indicate direct io is supported, but since we
  1062. * intercept O_DIRECT reads and writes early, this function should
  1063. * never get called.
  1064. */
  1065. static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
  1066. loff_t pos)
  1067. {
  1068. WARN_ON(1);
  1069. return -EINVAL;
  1070. }
  1071. const struct address_space_operations ceph_aops = {
  1072. .readpage = ceph_readpage,
  1073. .readpages = ceph_readpages,
  1074. .writepage = ceph_writepage,
  1075. .writepages = ceph_writepages_start,
  1076. .write_begin = ceph_write_begin,
  1077. .write_end = ceph_write_end,
  1078. .set_page_dirty = ceph_set_page_dirty,
  1079. .invalidatepage = ceph_invalidatepage,
  1080. .releasepage = ceph_releasepage,
  1081. .direct_IO = ceph_direct_io,
  1082. };
  1083. /*
  1084. * vm ops
  1085. */
  1086. static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1087. {
  1088. struct inode *inode = file_inode(vma->vm_file);
  1089. struct ceph_inode_info *ci = ceph_inode(inode);
  1090. struct ceph_file_info *fi = vma->vm_file->private_data;
  1091. struct page *pinned_page = NULL;
  1092. loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
  1093. int want, got, ret;
  1094. dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
  1095. inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE);
  1096. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1097. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  1098. else
  1099. want = CEPH_CAP_FILE_CACHE;
  1100. while (1) {
  1101. got = 0;
  1102. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want,
  1103. -1, &got, &pinned_page);
  1104. if (ret == 0)
  1105. break;
  1106. if (ret != -ERESTARTSYS) {
  1107. WARN_ON(1);
  1108. return VM_FAULT_SIGBUS;
  1109. }
  1110. }
  1111. dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
  1112. inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
  1113. if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
  1114. ci->i_inline_version == CEPH_INLINE_NONE)
  1115. ret = filemap_fault(vma, vmf);
  1116. else
  1117. ret = -EAGAIN;
  1118. dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
  1119. inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
  1120. if (pinned_page)
  1121. page_cache_release(pinned_page);
  1122. ceph_put_cap_refs(ci, got);
  1123. if (ret != -EAGAIN)
  1124. return ret;
  1125. /* read inline data */
  1126. if (off >= PAGE_CACHE_SIZE) {
  1127. /* does not support inline data > PAGE_SIZE */
  1128. ret = VM_FAULT_SIGBUS;
  1129. } else {
  1130. int ret1;
  1131. struct address_space *mapping = inode->i_mapping;
  1132. struct page *page = find_or_create_page(mapping, 0,
  1133. mapping_gfp_constraint(mapping,
  1134. ~__GFP_FS));
  1135. if (!page) {
  1136. ret = VM_FAULT_OOM;
  1137. goto out;
  1138. }
  1139. ret1 = __ceph_do_getattr(inode, page,
  1140. CEPH_STAT_CAP_INLINE_DATA, true);
  1141. if (ret1 < 0 || off >= i_size_read(inode)) {
  1142. unlock_page(page);
  1143. page_cache_release(page);
  1144. ret = VM_FAULT_SIGBUS;
  1145. goto out;
  1146. }
  1147. if (ret1 < PAGE_CACHE_SIZE)
  1148. zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
  1149. else
  1150. flush_dcache_page(page);
  1151. SetPageUptodate(page);
  1152. vmf->page = page;
  1153. ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
  1154. }
  1155. out:
  1156. dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
  1157. inode, off, (size_t)PAGE_CACHE_SIZE, ret);
  1158. return ret;
  1159. }
  1160. /*
  1161. * Reuse write_begin here for simplicity.
  1162. */
  1163. static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  1164. {
  1165. struct inode *inode = file_inode(vma->vm_file);
  1166. struct ceph_inode_info *ci = ceph_inode(inode);
  1167. struct ceph_file_info *fi = vma->vm_file->private_data;
  1168. struct ceph_cap_flush *prealloc_cf;
  1169. struct page *page = vmf->page;
  1170. loff_t off = page_offset(page);
  1171. loff_t size = i_size_read(inode);
  1172. size_t len;
  1173. int want, got, ret;
  1174. prealloc_cf = ceph_alloc_cap_flush();
  1175. if (!prealloc_cf)
  1176. return VM_FAULT_SIGBUS;
  1177. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1178. struct page *locked_page = NULL;
  1179. if (off == 0) {
  1180. lock_page(page);
  1181. locked_page = page;
  1182. }
  1183. ret = ceph_uninline_data(vma->vm_file, locked_page);
  1184. if (locked_page)
  1185. unlock_page(locked_page);
  1186. if (ret < 0) {
  1187. ret = VM_FAULT_SIGBUS;
  1188. goto out_free;
  1189. }
  1190. }
  1191. if (off + PAGE_CACHE_SIZE <= size)
  1192. len = PAGE_CACHE_SIZE;
  1193. else
  1194. len = size & ~PAGE_CACHE_MASK;
  1195. dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
  1196. inode, ceph_vinop(inode), off, len, size);
  1197. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1198. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1199. else
  1200. want = CEPH_CAP_FILE_BUFFER;
  1201. while (1) {
  1202. got = 0;
  1203. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
  1204. &got, NULL);
  1205. if (ret == 0)
  1206. break;
  1207. if (ret != -ERESTARTSYS) {
  1208. WARN_ON(1);
  1209. ret = VM_FAULT_SIGBUS;
  1210. goto out_free;
  1211. }
  1212. }
  1213. dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
  1214. inode, off, len, ceph_cap_string(got));
  1215. /* Update time before taking page lock */
  1216. file_update_time(vma->vm_file);
  1217. lock_page(page);
  1218. ret = VM_FAULT_NOPAGE;
  1219. if ((off > size) ||
  1220. (page->mapping != inode->i_mapping))
  1221. goto out;
  1222. ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
  1223. if (ret == 0) {
  1224. /* success. we'll keep the page locked. */
  1225. set_page_dirty(page);
  1226. ret = VM_FAULT_LOCKED;
  1227. } else {
  1228. if (ret == -ENOMEM)
  1229. ret = VM_FAULT_OOM;
  1230. else
  1231. ret = VM_FAULT_SIGBUS;
  1232. }
  1233. out:
  1234. if (ret != VM_FAULT_LOCKED)
  1235. unlock_page(page);
  1236. if (ret == VM_FAULT_LOCKED ||
  1237. ci->i_inline_version != CEPH_INLINE_NONE) {
  1238. int dirty;
  1239. spin_lock(&ci->i_ceph_lock);
  1240. ci->i_inline_version = CEPH_INLINE_NONE;
  1241. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
  1242. &prealloc_cf);
  1243. spin_unlock(&ci->i_ceph_lock);
  1244. if (dirty)
  1245. __mark_inode_dirty(inode, dirty);
  1246. }
  1247. dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n",
  1248. inode, off, len, ceph_cap_string(got), ret);
  1249. ceph_put_cap_refs(ci, got);
  1250. out_free:
  1251. ceph_free_cap_flush(prealloc_cf);
  1252. return ret;
  1253. }
  1254. void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
  1255. char *data, size_t len)
  1256. {
  1257. struct address_space *mapping = inode->i_mapping;
  1258. struct page *page;
  1259. if (locked_page) {
  1260. page = locked_page;
  1261. } else {
  1262. if (i_size_read(inode) == 0)
  1263. return;
  1264. page = find_or_create_page(mapping, 0,
  1265. mapping_gfp_constraint(mapping,
  1266. ~__GFP_FS));
  1267. if (!page)
  1268. return;
  1269. if (PageUptodate(page)) {
  1270. unlock_page(page);
  1271. page_cache_release(page);
  1272. return;
  1273. }
  1274. }
  1275. dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
  1276. inode, ceph_vinop(inode), len, locked_page);
  1277. if (len > 0) {
  1278. void *kaddr = kmap_atomic(page);
  1279. memcpy(kaddr, data, len);
  1280. kunmap_atomic(kaddr);
  1281. }
  1282. if (page != locked_page) {
  1283. if (len < PAGE_CACHE_SIZE)
  1284. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  1285. else
  1286. flush_dcache_page(page);
  1287. SetPageUptodate(page);
  1288. unlock_page(page);
  1289. page_cache_release(page);
  1290. }
  1291. }
  1292. int ceph_uninline_data(struct file *filp, struct page *locked_page)
  1293. {
  1294. struct inode *inode = file_inode(filp);
  1295. struct ceph_inode_info *ci = ceph_inode(inode);
  1296. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1297. struct ceph_osd_request *req;
  1298. struct page *page = NULL;
  1299. u64 len, inline_version;
  1300. int err = 0;
  1301. bool from_pagecache = false;
  1302. spin_lock(&ci->i_ceph_lock);
  1303. inline_version = ci->i_inline_version;
  1304. spin_unlock(&ci->i_ceph_lock);
  1305. dout("uninline_data %p %llx.%llx inline_version %llu\n",
  1306. inode, ceph_vinop(inode), inline_version);
  1307. if (inline_version == 1 || /* initial version, no data */
  1308. inline_version == CEPH_INLINE_NONE)
  1309. goto out;
  1310. if (locked_page) {
  1311. page = locked_page;
  1312. WARN_ON(!PageUptodate(page));
  1313. } else if (ceph_caps_issued(ci) &
  1314. (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
  1315. page = find_get_page(inode->i_mapping, 0);
  1316. if (page) {
  1317. if (PageUptodate(page)) {
  1318. from_pagecache = true;
  1319. lock_page(page);
  1320. } else {
  1321. page_cache_release(page);
  1322. page = NULL;
  1323. }
  1324. }
  1325. }
  1326. if (page) {
  1327. len = i_size_read(inode);
  1328. if (len > PAGE_CACHE_SIZE)
  1329. len = PAGE_CACHE_SIZE;
  1330. } else {
  1331. page = __page_cache_alloc(GFP_NOFS);
  1332. if (!page) {
  1333. err = -ENOMEM;
  1334. goto out;
  1335. }
  1336. err = __ceph_do_getattr(inode, page,
  1337. CEPH_STAT_CAP_INLINE_DATA, true);
  1338. if (err < 0) {
  1339. /* no inline data */
  1340. if (err == -ENODATA)
  1341. err = 0;
  1342. goto out;
  1343. }
  1344. len = err;
  1345. }
  1346. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1347. ceph_vino(inode), 0, &len, 0, 1,
  1348. CEPH_OSD_OP_CREATE,
  1349. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1350. ceph_empty_snapc, 0, 0, false);
  1351. if (IS_ERR(req)) {
  1352. err = PTR_ERR(req);
  1353. goto out;
  1354. }
  1355. ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
  1356. err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1357. if (!err)
  1358. err = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1359. ceph_osdc_put_request(req);
  1360. if (err < 0)
  1361. goto out;
  1362. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1363. ceph_vino(inode), 0, &len, 1, 3,
  1364. CEPH_OSD_OP_WRITE,
  1365. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1366. ceph_empty_snapc,
  1367. ci->i_truncate_seq, ci->i_truncate_size,
  1368. false);
  1369. if (IS_ERR(req)) {
  1370. err = PTR_ERR(req);
  1371. goto out;
  1372. }
  1373. osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
  1374. {
  1375. __le64 xattr_buf = cpu_to_le64(inline_version);
  1376. err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
  1377. "inline_version", &xattr_buf,
  1378. sizeof(xattr_buf),
  1379. CEPH_OSD_CMPXATTR_OP_GT,
  1380. CEPH_OSD_CMPXATTR_MODE_U64);
  1381. if (err)
  1382. goto out_put;
  1383. }
  1384. {
  1385. char xattr_buf[32];
  1386. int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
  1387. "%llu", inline_version);
  1388. err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
  1389. "inline_version",
  1390. xattr_buf, xattr_len, 0, 0);
  1391. if (err)
  1392. goto out_put;
  1393. }
  1394. ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
  1395. err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1396. if (!err)
  1397. err = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1398. out_put:
  1399. ceph_osdc_put_request(req);
  1400. if (err == -ECANCELED)
  1401. err = 0;
  1402. out:
  1403. if (page && page != locked_page) {
  1404. if (from_pagecache) {
  1405. unlock_page(page);
  1406. page_cache_release(page);
  1407. } else
  1408. __free_pages(page, 0);
  1409. }
  1410. dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
  1411. inode, ceph_vinop(inode), inline_version, err);
  1412. return err;
  1413. }
  1414. static const struct vm_operations_struct ceph_vmops = {
  1415. .fault = ceph_filemap_fault,
  1416. .page_mkwrite = ceph_page_mkwrite,
  1417. };
  1418. int ceph_mmap(struct file *file, struct vm_area_struct *vma)
  1419. {
  1420. struct address_space *mapping = file->f_mapping;
  1421. if (!mapping->a_ops->readpage)
  1422. return -ENOEXEC;
  1423. file_accessed(file);
  1424. vma->vm_ops = &ceph_vmops;
  1425. return 0;
  1426. }
  1427. enum {
  1428. POOL_READ = 1,
  1429. POOL_WRITE = 2,
  1430. };
  1431. static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
  1432. {
  1433. struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
  1434. struct ceph_mds_client *mdsc = fsc->mdsc;
  1435. struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
  1436. struct rb_node **p, *parent;
  1437. struct ceph_pool_perm *perm;
  1438. struct page **pages;
  1439. int err = 0, err2 = 0, have = 0;
  1440. down_read(&mdsc->pool_perm_rwsem);
  1441. p = &mdsc->pool_perm_tree.rb_node;
  1442. while (*p) {
  1443. perm = rb_entry(*p, struct ceph_pool_perm, node);
  1444. if (pool < perm->pool)
  1445. p = &(*p)->rb_left;
  1446. else if (pool > perm->pool)
  1447. p = &(*p)->rb_right;
  1448. else {
  1449. have = perm->perm;
  1450. break;
  1451. }
  1452. }
  1453. up_read(&mdsc->pool_perm_rwsem);
  1454. if (*p)
  1455. goto out;
  1456. dout("__ceph_pool_perm_get pool %u no perm cached\n", pool);
  1457. down_write(&mdsc->pool_perm_rwsem);
  1458. parent = NULL;
  1459. while (*p) {
  1460. parent = *p;
  1461. perm = rb_entry(parent, struct ceph_pool_perm, node);
  1462. if (pool < perm->pool)
  1463. p = &(*p)->rb_left;
  1464. else if (pool > perm->pool)
  1465. p = &(*p)->rb_right;
  1466. else {
  1467. have = perm->perm;
  1468. break;
  1469. }
  1470. }
  1471. if (*p) {
  1472. up_write(&mdsc->pool_perm_rwsem);
  1473. goto out;
  1474. }
  1475. rd_req = ceph_osdc_alloc_request(&fsc->client->osdc,
  1476. ceph_empty_snapc,
  1477. 1, false, GFP_NOFS);
  1478. if (!rd_req) {
  1479. err = -ENOMEM;
  1480. goto out_unlock;
  1481. }
  1482. rd_req->r_flags = CEPH_OSD_FLAG_READ;
  1483. osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
  1484. rd_req->r_base_oloc.pool = pool;
  1485. snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name),
  1486. "%llx.00000000", ci->i_vino.ino);
  1487. rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
  1488. wr_req = ceph_osdc_alloc_request(&fsc->client->osdc,
  1489. ceph_empty_snapc,
  1490. 1, false, GFP_NOFS);
  1491. if (!wr_req) {
  1492. err = -ENOMEM;
  1493. goto out_unlock;
  1494. }
  1495. wr_req->r_flags = CEPH_OSD_FLAG_WRITE |
  1496. CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
  1497. osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
  1498. wr_req->r_base_oloc.pool = pool;
  1499. wr_req->r_base_oid = rd_req->r_base_oid;
  1500. /* one page should be large enough for STAT data */
  1501. pages = ceph_alloc_page_vector(1, GFP_KERNEL);
  1502. if (IS_ERR(pages)) {
  1503. err = PTR_ERR(pages);
  1504. goto out_unlock;
  1505. }
  1506. osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
  1507. 0, false, true);
  1508. ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP,
  1509. &ci->vfs_inode.i_mtime);
  1510. err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
  1511. ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP,
  1512. &ci->vfs_inode.i_mtime);
  1513. err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
  1514. if (!err)
  1515. err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
  1516. if (!err2)
  1517. err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
  1518. if (err >= 0 || err == -ENOENT)
  1519. have |= POOL_READ;
  1520. else if (err != -EPERM)
  1521. goto out_unlock;
  1522. if (err2 == 0 || err2 == -EEXIST)
  1523. have |= POOL_WRITE;
  1524. else if (err2 != -EPERM) {
  1525. err = err2;
  1526. goto out_unlock;
  1527. }
  1528. perm = kmalloc(sizeof(*perm), GFP_NOFS);
  1529. if (!perm) {
  1530. err = -ENOMEM;
  1531. goto out_unlock;
  1532. }
  1533. perm->pool = pool;
  1534. perm->perm = have;
  1535. rb_link_node(&perm->node, parent, p);
  1536. rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
  1537. err = 0;
  1538. out_unlock:
  1539. up_write(&mdsc->pool_perm_rwsem);
  1540. if (rd_req)
  1541. ceph_osdc_put_request(rd_req);
  1542. if (wr_req)
  1543. ceph_osdc_put_request(wr_req);
  1544. out:
  1545. if (!err)
  1546. err = have;
  1547. dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err);
  1548. return err;
  1549. }
  1550. int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
  1551. {
  1552. u32 pool;
  1553. int ret, flags;
  1554. if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
  1555. NOPOOLPERM))
  1556. return 0;
  1557. spin_lock(&ci->i_ceph_lock);
  1558. flags = ci->i_ceph_flags;
  1559. pool = ceph_file_layout_pg_pool(ci->i_layout);
  1560. spin_unlock(&ci->i_ceph_lock);
  1561. check:
  1562. if (flags & CEPH_I_POOL_PERM) {
  1563. if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
  1564. dout("ceph_pool_perm_check pool %u no read perm\n",
  1565. pool);
  1566. return -EPERM;
  1567. }
  1568. if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
  1569. dout("ceph_pool_perm_check pool %u no write perm\n",
  1570. pool);
  1571. return -EPERM;
  1572. }
  1573. return 0;
  1574. }
  1575. ret = __ceph_pool_perm_get(ci, pool);
  1576. if (ret < 0)
  1577. return ret;
  1578. flags = CEPH_I_POOL_PERM;
  1579. if (ret & POOL_READ)
  1580. flags |= CEPH_I_POOL_RD;
  1581. if (ret & POOL_WRITE)
  1582. flags |= CEPH_I_POOL_WR;
  1583. spin_lock(&ci->i_ceph_lock);
  1584. if (pool == ceph_file_layout_pg_pool(ci->i_layout)) {
  1585. ci->i_ceph_flags = flags;
  1586. } else {
  1587. pool = ceph_file_layout_pg_pool(ci->i_layout);
  1588. flags = ci->i_ceph_flags;
  1589. }
  1590. spin_unlock(&ci->i_ceph_lock);
  1591. goto check;
  1592. }
  1593. void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
  1594. {
  1595. struct ceph_pool_perm *perm;
  1596. struct rb_node *n;
  1597. while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
  1598. n = rb_first(&mdsc->pool_perm_tree);
  1599. perm = rb_entry(n, struct ceph_pool_perm, node);
  1600. rb_erase(n, &mdsc->pool_perm_tree);
  1601. kfree(perm);
  1602. }
  1603. }