ops_file.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/uio.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/mm.h>
  18. #include <linux/smp_lock.h>
  19. #include <linux/gfs2_ioctl.h>
  20. #include <asm/semaphore.h>
  21. #include <asm/uaccess.h>
  22. #include "gfs2.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "glock.h"
  26. #include "glops.h"
  27. #include "inode.h"
  28. #include "jdata.h"
  29. #include "lm.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "ops_file.h"
  33. #include "ops_vm.h"
  34. #include "quota.h"
  35. #include "rgrp.h"
  36. #include "trans.h"
  37. /* "bad" is for NFS support */
  38. struct filldir_bad_entry {
  39. char *fbe_name;
  40. unsigned int fbe_length;
  41. uint64_t fbe_offset;
  42. struct gfs2_inum fbe_inum;
  43. unsigned int fbe_type;
  44. };
  45. struct filldir_bad {
  46. struct gfs2_sbd *fdb_sbd;
  47. struct filldir_bad_entry *fdb_entry;
  48. unsigned int fdb_entry_num;
  49. unsigned int fdb_entry_off;
  50. char *fdb_name;
  51. unsigned int fdb_name_size;
  52. unsigned int fdb_name_off;
  53. };
  54. /* For regular, non-NFS */
  55. struct filldir_reg {
  56. struct gfs2_sbd *fdr_sbd;
  57. int fdr_prefetch;
  58. filldir_t fdr_filldir;
  59. void *fdr_opaque;
  60. };
  61. typedef ssize_t(*do_rw_t) (struct file *file,
  62. char __user *buf,
  63. size_t size, loff_t *offset,
  64. unsigned int num_gh, struct gfs2_holder *ghs);
  65. /**
  66. * gfs2_llseek - seek to a location in a file
  67. * @file: the file
  68. * @offset: the offset
  69. * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  70. *
  71. * SEEK_END requires the glock for the file because it references the
  72. * file's size.
  73. *
  74. * Returns: The new offset, or errno
  75. */
  76. static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  77. {
  78. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  79. struct gfs2_holder i_gh;
  80. loff_t error;
  81. atomic_inc(&ip->i_sbd->sd_ops_file);
  82. if (origin == 2) {
  83. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  84. &i_gh);
  85. if (!error) {
  86. error = remote_llseek(file, offset, origin);
  87. gfs2_glock_dq_uninit(&i_gh);
  88. }
  89. } else
  90. error = remote_llseek(file, offset, origin);
  91. return error;
  92. }
  93. static inline unsigned int vma2state(struct vm_area_struct *vma)
  94. {
  95. if ((vma->vm_flags & (VM_MAYWRITE | VM_MAYSHARE)) ==
  96. (VM_MAYWRITE | VM_MAYSHARE))
  97. return LM_ST_EXCLUSIVE;
  98. return LM_ST_SHARED;
  99. }
  100. static ssize_t walk_vm_hard(struct file *file, const char __user *buf, size_t size,
  101. loff_t *offset, do_rw_t operation)
  102. {
  103. struct gfs2_holder *ghs;
  104. unsigned int num_gh = 0;
  105. ssize_t count;
  106. struct super_block *sb = file->f_dentry->d_inode->i_sb;
  107. struct mm_struct *mm = current->mm;
  108. struct vm_area_struct *vma;
  109. unsigned long start = (unsigned long)buf;
  110. unsigned long end = start + size;
  111. int dumping = (current->flags & PF_DUMPCORE);
  112. unsigned int x = 0;
  113. for (vma = find_vma(mm, start); vma; vma = vma->vm_next) {
  114. if (end <= vma->vm_start)
  115. break;
  116. if (vma->vm_file &&
  117. vma->vm_file->f_dentry->d_inode->i_sb == sb) {
  118. num_gh++;
  119. }
  120. }
  121. ghs = kcalloc((num_gh + 1), sizeof(struct gfs2_holder), GFP_KERNEL);
  122. if (!ghs) {
  123. if (!dumping)
  124. up_read(&mm->mmap_sem);
  125. return -ENOMEM;
  126. }
  127. for (vma = find_vma(mm, start); vma; vma = vma->vm_next) {
  128. if (end <= vma->vm_start)
  129. break;
  130. if (vma->vm_file) {
  131. struct inode *inode = vma->vm_file->f_dentry->d_inode;
  132. if (inode->i_sb == sb)
  133. gfs2_holder_init(get_v2ip(inode)->i_gl,
  134. vma2state(vma), 0, &ghs[x++]);
  135. }
  136. }
  137. if (!dumping)
  138. up_read(&mm->mmap_sem);
  139. gfs2_assert(get_v2sdp(sb), x == num_gh);
  140. count = operation(file, buf, size, offset, num_gh, ghs);
  141. while (num_gh--)
  142. gfs2_holder_uninit(&ghs[num_gh]);
  143. kfree(ghs);
  144. return count;
  145. }
  146. /**
  147. * walk_vm - Walk the vmas associated with a buffer for read or write.
  148. * If any of them are gfs2, pass the gfs2 inode down to the read/write
  149. * worker function so that locks can be acquired in the correct order.
  150. * @file: The file to read/write from/to
  151. * @buf: The buffer to copy to/from
  152. * @size: The amount of data requested
  153. * @offset: The current file offset
  154. * @operation: The read or write worker function
  155. *
  156. * Outputs: Offset - updated according to number of bytes written
  157. *
  158. * Returns: The number of bytes written, errno on failure
  159. */
  160. static ssize_t walk_vm(struct file *file, const char __user *buf, size_t size,
  161. loff_t *offset, do_rw_t operation)
  162. {
  163. struct gfs2_holder gh;
  164. if (current->mm) {
  165. struct super_block *sb = file->f_dentry->d_inode->i_sb;
  166. struct mm_struct *mm = current->mm;
  167. struct vm_area_struct *vma;
  168. unsigned long start = (unsigned long)buf;
  169. unsigned long end = start + size;
  170. int dumping = (current->flags & PF_DUMPCORE);
  171. if (!dumping)
  172. down_read(&mm->mmap_sem);
  173. for (vma = find_vma(mm, start); vma; vma = vma->vm_next) {
  174. if (end <= vma->vm_start)
  175. break;
  176. if (vma->vm_file &&
  177. vma->vm_file->f_dentry->d_inode->i_sb == sb)
  178. goto do_locks;
  179. }
  180. if (!dumping)
  181. up_read(&mm->mmap_sem);
  182. }
  183. return operation(file, buf, size, offset, 0, &gh);
  184. do_locks:
  185. return walk_vm_hard(file, buf, size, offset, operation);
  186. }
  187. static ssize_t do_jdata_read(struct file *file, char __user *buf, size_t size,
  188. loff_t *offset)
  189. {
  190. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  191. ssize_t count = 0;
  192. if (*offset < 0)
  193. return -EINVAL;
  194. if (!access_ok(VERIFY_WRITE, buf, size))
  195. return -EFAULT;
  196. if (!(file->f_flags & O_LARGEFILE)) {
  197. if (*offset >= MAX_NON_LFS)
  198. return -EFBIG;
  199. if (*offset + size > MAX_NON_LFS)
  200. size = MAX_NON_LFS - *offset;
  201. }
  202. count = gfs2_jdata_read(ip, buf, *offset, size, gfs2_copy2user);
  203. if (count > 0)
  204. *offset += count;
  205. return count;
  206. }
  207. /**
  208. * do_read_direct - Read bytes from a file
  209. * @file: The file to read from
  210. * @buf: The buffer to copy into
  211. * @size: The amount of data requested
  212. * @offset: The current file offset
  213. * @num_gh: The number of other locks we need to do the read
  214. * @ghs: the locks we need plus one for our lock
  215. *
  216. * Outputs: Offset - updated according to number of bytes read
  217. *
  218. * Returns: The number of bytes read, errno on failure
  219. */
  220. static ssize_t do_read_direct(struct file *file, char __user *buf, size_t size,
  221. loff_t *offset, unsigned int num_gh,
  222. struct gfs2_holder *ghs)
  223. {
  224. struct inode *inode = file->f_mapping->host;
  225. struct gfs2_inode *ip = get_v2ip(inode);
  226. unsigned int state = LM_ST_DEFERRED;
  227. int flags = 0;
  228. unsigned int x;
  229. ssize_t count = 0;
  230. int error;
  231. for (x = 0; x < num_gh; x++)
  232. if (ghs[x].gh_gl == ip->i_gl) {
  233. state = LM_ST_SHARED;
  234. flags |= GL_LOCAL_EXCL;
  235. break;
  236. }
  237. gfs2_holder_init(ip->i_gl, state, flags, &ghs[num_gh]);
  238. error = gfs2_glock_nq_m(num_gh + 1, ghs);
  239. if (error)
  240. goto out;
  241. error = -EINVAL;
  242. if (gfs2_is_jdata(ip))
  243. goto out_gunlock;
  244. if (gfs2_is_stuffed(ip)) {
  245. size_t mask = bdev_hardsect_size(inode->i_sb->s_bdev) - 1;
  246. if (((*offset) & mask) || (((unsigned long)buf) & mask))
  247. goto out_gunlock;
  248. count = do_jdata_read(file, buf, size & ~mask, offset);
  249. } else
  250. count = generic_file_read(file, buf, size, offset);
  251. error = 0;
  252. out_gunlock:
  253. gfs2_glock_dq_m(num_gh + 1, ghs);
  254. out:
  255. gfs2_holder_uninit(&ghs[num_gh]);
  256. return (count) ? count : error;
  257. }
  258. /**
  259. * do_read_buf - Read bytes from a file
  260. * @file: The file to read from
  261. * @buf: The buffer to copy into
  262. * @size: The amount of data requested
  263. * @offset: The current file offset
  264. * @num_gh: The number of other locks we need to do the read
  265. * @ghs: the locks we need plus one for our lock
  266. *
  267. * Outputs: Offset - updated according to number of bytes read
  268. *
  269. * Returns: The number of bytes read, errno on failure
  270. */
  271. static ssize_t do_read_buf(struct file *file, char __user *buf, size_t size,
  272. loff_t *offset, unsigned int num_gh,
  273. struct gfs2_holder *ghs)
  274. {
  275. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  276. ssize_t count = 0;
  277. int error;
  278. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &ghs[num_gh]);
  279. error = gfs2_glock_nq_m_atime(num_gh + 1, ghs);
  280. if (error)
  281. goto out;
  282. if (gfs2_is_jdata(ip))
  283. count = do_jdata_read(file, buf, size, offset);
  284. else
  285. count = generic_file_read(file, buf, size, offset);
  286. gfs2_glock_dq_m(num_gh + 1, ghs);
  287. out:
  288. gfs2_holder_uninit(&ghs[num_gh]);
  289. return (count) ? count : error;
  290. }
  291. /**
  292. * gfs2_read - Read bytes from a file
  293. * @file: The file to read from
  294. * @buf: The buffer to copy into
  295. * @size: The amount of data requested
  296. * @offset: The current file offset
  297. *
  298. * Outputs: Offset - updated according to number of bytes read
  299. *
  300. * Returns: The number of bytes read, errno on failure
  301. */
  302. static ssize_t gfs2_read(struct file *file, char __user *buf, size_t size,
  303. loff_t *offset)
  304. {
  305. atomic_inc(&get_v2sdp(file->f_mapping->host->i_sb)->sd_ops_file);
  306. if (file->f_flags & O_DIRECT)
  307. return walk_vm(file, buf, size, offset, do_read_direct);
  308. else
  309. return walk_vm(file, buf, size, offset, do_read_buf);
  310. }
  311. /**
  312. * grope_mapping - feel up a mapping that needs to be written
  313. * @buf: the start of the memory to be written
  314. * @size: the size of the memory to be written
  315. *
  316. * We do this after acquiring the locks on the mapping,
  317. * but before starting the write transaction. We need to make
  318. * sure that we don't cause recursive transactions if blocks
  319. * need to be allocated to the file backing the mapping.
  320. *
  321. * Returns: errno
  322. */
  323. static int grope_mapping(const char __user *buf, size_t size)
  324. {
  325. const char __user *stop = buf + size;
  326. char c;
  327. while (buf < stop) {
  328. if (copy_from_user(&c, buf, 1))
  329. return -EFAULT;
  330. buf += PAGE_CACHE_SIZE;
  331. buf = (const char __user *)PAGE_ALIGN((unsigned long)buf);
  332. }
  333. return 0;
  334. }
  335. /**
  336. * do_write_direct_alloc - Write bytes to a file
  337. * @file: The file to write to
  338. * @buf: The buffer to copy from
  339. * @size: The amount of data requested
  340. * @offset: The current file offset
  341. *
  342. * Outputs: Offset - updated according to number of bytes written
  343. *
  344. * Returns: The number of bytes written, errno on failure
  345. */
  346. static ssize_t do_write_direct_alloc(struct file *file, const char __user *buf, size_t size,
  347. loff_t *offset)
  348. {
  349. struct inode *inode = file->f_mapping->host;
  350. struct gfs2_inode *ip = get_v2ip(inode);
  351. struct gfs2_sbd *sdp = ip->i_sbd;
  352. struct gfs2_alloc *al = NULL;
  353. struct iovec local_iov = { .iov_base = buf, .iov_len = size };
  354. struct buffer_head *dibh;
  355. unsigned int data_blocks, ind_blocks;
  356. ssize_t count;
  357. int error;
  358. gfs2_write_calc_reserv(ip, size, &data_blocks, &ind_blocks);
  359. al = gfs2_alloc_get(ip);
  360. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  361. if (error)
  362. goto fail;
  363. error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
  364. if (error)
  365. goto fail_gunlock_q;
  366. al->al_requested = data_blocks + ind_blocks;
  367. error = gfs2_inplace_reserve(ip);
  368. if (error)
  369. goto fail_gunlock_q;
  370. error = gfs2_trans_begin(sdp,
  371. al->al_rgd->rd_ri.ri_length + ind_blocks +
  372. RES_DINODE + RES_STATFS + RES_QUOTA, 0);
  373. if (error)
  374. goto fail_ipres;
  375. if ((ip->i_di.di_mode & (S_ISUID | S_ISGID)) && !capable(CAP_FSETID)) {
  376. error = gfs2_meta_inode_buffer(ip, &dibh);
  377. if (error)
  378. goto fail_end_trans;
  379. ip->i_di.di_mode &= (ip->i_di.di_mode & S_IXGRP) ?
  380. (~(S_ISUID | S_ISGID)) : (~S_ISUID);
  381. gfs2_trans_add_bh(ip->i_gl, dibh);
  382. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  383. brelse(dibh);
  384. }
  385. if (gfs2_is_stuffed(ip)) {
  386. error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_sync, NULL);
  387. if (error)
  388. goto fail_end_trans;
  389. }
  390. count = generic_file_write_nolock(file, &local_iov, 1, offset);
  391. if (count < 0) {
  392. error = count;
  393. goto fail_end_trans;
  394. }
  395. error = gfs2_meta_inode_buffer(ip, &dibh);
  396. if (error)
  397. goto fail_end_trans;
  398. if (ip->i_di.di_size < inode->i_size)
  399. ip->i_di.di_size = inode->i_size;
  400. ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
  401. gfs2_trans_add_bh(ip->i_gl, dibh);
  402. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  403. brelse(dibh);
  404. gfs2_trans_end(sdp);
  405. if (file->f_flags & O_SYNC)
  406. gfs2_log_flush_glock(ip->i_gl);
  407. gfs2_inplace_release(ip);
  408. gfs2_quota_unlock(ip);
  409. gfs2_alloc_put(ip);
  410. if (file->f_mapping->nrpages) {
  411. error = filemap_fdatawrite(file->f_mapping);
  412. if (!error)
  413. error = filemap_fdatawait(file->f_mapping);
  414. }
  415. if (error)
  416. return error;
  417. return count;
  418. fail_end_trans:
  419. gfs2_trans_end(sdp);
  420. fail_ipres:
  421. gfs2_inplace_release(ip);
  422. fail_gunlock_q:
  423. gfs2_quota_unlock(ip);
  424. fail:
  425. gfs2_alloc_put(ip);
  426. return error;
  427. }
  428. /**
  429. * do_write_direct - Write bytes to a file
  430. * @file: The file to write to
  431. * @buf: The buffer to copy from
  432. * @size: The amount of data requested
  433. * @offset: The current file offset
  434. * @num_gh: The number of other locks we need to do the read
  435. * @gh: the locks we need plus one for our lock
  436. *
  437. * Outputs: Offset - updated according to number of bytes written
  438. *
  439. * Returns: The number of bytes written, errno on failure
  440. */
  441. static ssize_t do_write_direct(struct file *file, const char __user *buf, size_t size,
  442. loff_t *offset, unsigned int num_gh,
  443. struct gfs2_holder *ghs)
  444. {
  445. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  446. struct gfs2_sbd *sdp = ip->i_sbd;
  447. struct gfs2_file *fp = get_v2fp(file);
  448. unsigned int state = LM_ST_DEFERRED;
  449. int alloc_required;
  450. unsigned int x;
  451. size_t s;
  452. ssize_t count = 0;
  453. int error;
  454. if (test_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags))
  455. state = LM_ST_EXCLUSIVE;
  456. else
  457. for (x = 0; x < num_gh; x++)
  458. if (ghs[x].gh_gl == ip->i_gl) {
  459. state = LM_ST_EXCLUSIVE;
  460. break;
  461. }
  462. restart:
  463. gfs2_holder_init(ip->i_gl, state, 0, &ghs[num_gh]);
  464. error = gfs2_glock_nq_m(num_gh + 1, ghs);
  465. if (error)
  466. goto out;
  467. error = -EINVAL;
  468. if (gfs2_is_jdata(ip))
  469. goto out_gunlock;
  470. if (num_gh) {
  471. error = grope_mapping(buf, size);
  472. if (error)
  473. goto out_gunlock;
  474. }
  475. if (file->f_flags & O_APPEND)
  476. *offset = ip->i_di.di_size;
  477. if (!(file->f_flags & O_LARGEFILE)) {
  478. error = -EFBIG;
  479. if (*offset >= MAX_NON_LFS)
  480. goto out_gunlock;
  481. if (*offset + size > MAX_NON_LFS)
  482. size = MAX_NON_LFS - *offset;
  483. }
  484. if (gfs2_is_stuffed(ip) ||
  485. *offset + size > ip->i_di.di_size ||
  486. ((ip->i_di.di_mode & (S_ISUID | S_ISGID)) && !capable(CAP_FSETID)))
  487. alloc_required = 1;
  488. else {
  489. error = gfs2_write_alloc_required(ip, *offset, size,
  490. &alloc_required);
  491. if (error)
  492. goto out_gunlock;
  493. }
  494. if (alloc_required && state != LM_ST_EXCLUSIVE) {
  495. gfs2_glock_dq_m(num_gh + 1, ghs);
  496. gfs2_holder_uninit(&ghs[num_gh]);
  497. state = LM_ST_EXCLUSIVE;
  498. goto restart;
  499. }
  500. if (alloc_required) {
  501. set_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
  502. /* split large writes into smaller atomic transactions */
  503. while (size) {
  504. s = gfs2_tune_get(sdp, gt_max_atomic_write);
  505. if (s > size)
  506. s = size;
  507. error = do_write_direct_alloc(file, buf, s, offset);
  508. if (error < 0)
  509. goto out_gunlock;
  510. buf += error;
  511. size -= error;
  512. count += error;
  513. }
  514. } else {
  515. struct iovec local_iov = { .iov_base = buf, .iov_len = size };
  516. struct gfs2_holder t_gh;
  517. clear_bit(GFF_DID_DIRECT_ALLOC, &fp->f_flags);
  518. error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
  519. GL_NEVER_RECURSE, &t_gh);
  520. if (error)
  521. goto out_gunlock;
  522. count = generic_file_write_nolock(file, &local_iov, 1, offset);
  523. gfs2_glock_dq_uninit(&t_gh);
  524. }
  525. error = 0;
  526. out_gunlock:
  527. gfs2_glock_dq_m(num_gh + 1, ghs);
  528. out:
  529. gfs2_holder_uninit(&ghs[num_gh]);
  530. return (count) ? count : error;
  531. }
  532. /**
  533. * do_do_write_buf - Write bytes to a file
  534. * @file: The file to write to
  535. * @buf: The buffer to copy from
  536. * @size: The amount of data requested
  537. * @offset: The current file offset
  538. *
  539. * Outputs: Offset - updated according to number of bytes written
  540. *
  541. * Returns: The number of bytes written, errno on failure
  542. */
  543. static ssize_t do_do_write_buf(struct file *file, const char __user *buf, size_t size,
  544. loff_t *offset)
  545. {
  546. struct inode *inode = file->f_mapping->host;
  547. struct gfs2_inode *ip = get_v2ip(inode);
  548. struct gfs2_sbd *sdp = ip->i_sbd;
  549. struct gfs2_alloc *al = NULL;
  550. struct buffer_head *dibh;
  551. unsigned int data_blocks, ind_blocks;
  552. int alloc_required, journaled;
  553. ssize_t count;
  554. int error;
  555. journaled = gfs2_is_jdata(ip);
  556. gfs2_write_calc_reserv(ip, size, &data_blocks, &ind_blocks);
  557. error = gfs2_write_alloc_required(ip, *offset, size, &alloc_required);
  558. if (error)
  559. return error;
  560. if (alloc_required) {
  561. al = gfs2_alloc_get(ip);
  562. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  563. if (error)
  564. goto fail;
  565. error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
  566. if (error)
  567. goto fail_gunlock_q;
  568. al->al_requested = data_blocks + ind_blocks;
  569. error = gfs2_inplace_reserve(ip);
  570. if (error)
  571. goto fail_gunlock_q;
  572. error = gfs2_trans_begin(sdp,
  573. al->al_rgd->rd_ri.ri_length +
  574. ind_blocks +
  575. ((journaled) ? data_blocks : 0) +
  576. RES_DINODE + RES_STATFS + RES_QUOTA,
  577. 0);
  578. if (error)
  579. goto fail_ipres;
  580. } else {
  581. error = gfs2_trans_begin(sdp,
  582. ((journaled) ? data_blocks : 0) +
  583. RES_DINODE,
  584. 0);
  585. if (error)
  586. goto fail_ipres;
  587. }
  588. if ((ip->i_di.di_mode & (S_ISUID | S_ISGID)) && !capable(CAP_FSETID)) {
  589. error = gfs2_meta_inode_buffer(ip, &dibh);
  590. if (error)
  591. goto fail_end_trans;
  592. ip->i_di.di_mode &= (ip->i_di.di_mode & S_IXGRP) ?
  593. (~(S_ISUID | S_ISGID)) : (~S_ISUID);
  594. gfs2_trans_add_bh(ip->i_gl, dibh);
  595. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  596. brelse(dibh);
  597. }
  598. if (journaled) {
  599. count = gfs2_jdata_write(ip, buf, *offset, size,
  600. gfs2_copy_from_user);
  601. if (count < 0) {
  602. error = count;
  603. goto fail_end_trans;
  604. }
  605. *offset += count;
  606. } else {
  607. struct iovec local_iov = { .iov_base = buf, .iov_len = size };
  608. count = generic_file_write_nolock(file, &local_iov, 1, offset);
  609. if (count < 0) {
  610. error = count;
  611. goto fail_end_trans;
  612. }
  613. error = gfs2_meta_inode_buffer(ip, &dibh);
  614. if (error)
  615. goto fail_end_trans;
  616. if (ip->i_di.di_size < inode->i_size)
  617. ip->i_di.di_size = inode->i_size;
  618. ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
  619. gfs2_trans_add_bh(ip->i_gl, dibh);
  620. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  621. brelse(dibh);
  622. }
  623. gfs2_trans_end(sdp);
  624. if (file->f_flags & O_SYNC || IS_SYNC(inode)) {
  625. gfs2_log_flush_glock(ip->i_gl);
  626. error = filemap_fdatawrite(file->f_mapping);
  627. if (error == 0)
  628. error = filemap_fdatawait(file->f_mapping);
  629. if (error)
  630. goto fail_ipres;
  631. }
  632. if (alloc_required) {
  633. gfs2_assert_warn(sdp, count != size ||
  634. al->al_alloced);
  635. gfs2_inplace_release(ip);
  636. gfs2_quota_unlock(ip);
  637. gfs2_alloc_put(ip);
  638. }
  639. return count;
  640. fail_end_trans:
  641. gfs2_trans_end(sdp);
  642. fail_ipres:
  643. if (alloc_required)
  644. gfs2_inplace_release(ip);
  645. fail_gunlock_q:
  646. if (alloc_required)
  647. gfs2_quota_unlock(ip);
  648. fail:
  649. if (alloc_required)
  650. gfs2_alloc_put(ip);
  651. return error;
  652. }
  653. /**
  654. * do_write_buf - Write bytes to a file
  655. * @file: The file to write to
  656. * @buf: The buffer to copy from
  657. * @size: The amount of data requested
  658. * @offset: The current file offset
  659. * @num_gh: The number of other locks we need to do the read
  660. * @gh: the locks we need plus one for our lock
  661. *
  662. * Outputs: Offset - updated according to number of bytes written
  663. *
  664. * Returns: The number of bytes written, errno on failure
  665. */
  666. static ssize_t do_write_buf(struct file *file, const char __user *buf, size_t size,
  667. loff_t *offset, unsigned int num_gh,
  668. struct gfs2_holder *ghs)
  669. {
  670. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  671. struct gfs2_sbd *sdp = ip->i_sbd;
  672. size_t s;
  673. ssize_t count = 0;
  674. int error;
  675. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ghs[num_gh]);
  676. error = gfs2_glock_nq_m(num_gh + 1, ghs);
  677. if (error)
  678. goto out;
  679. if (num_gh) {
  680. error = grope_mapping(buf, size);
  681. if (error)
  682. goto out_gunlock;
  683. }
  684. if (file->f_flags & O_APPEND)
  685. *offset = ip->i_di.di_size;
  686. if (!(file->f_flags & O_LARGEFILE)) {
  687. error = -EFBIG;
  688. if (*offset >= MAX_NON_LFS)
  689. goto out_gunlock;
  690. if (*offset + size > MAX_NON_LFS)
  691. size = MAX_NON_LFS - *offset;
  692. }
  693. /* split large writes into smaller atomic transactions */
  694. while (size) {
  695. s = gfs2_tune_get(sdp, gt_max_atomic_write);
  696. if (s > size)
  697. s = size;
  698. error = do_do_write_buf(file, buf, s, offset);
  699. if (error < 0)
  700. goto out_gunlock;
  701. buf += error;
  702. size -= error;
  703. count += error;
  704. }
  705. error = 0;
  706. out_gunlock:
  707. gfs2_glock_dq_m(num_gh + 1, ghs);
  708. out:
  709. gfs2_holder_uninit(&ghs[num_gh]);
  710. return (count) ? count : error;
  711. }
  712. /**
  713. * gfs2_write - Write bytes to a file
  714. * @file: The file to write to
  715. * @buf: The buffer to copy from
  716. * @size: The amount of data requested
  717. * @offset: The current file offset
  718. *
  719. * Outputs: Offset - updated according to number of bytes written
  720. *
  721. * Returns: The number of bytes written, errno on failure
  722. */
  723. static ssize_t gfs2_write(struct file *file, const char __user *buf,
  724. size_t size, loff_t *offset)
  725. {
  726. struct inode *inode = file->f_mapping->host;
  727. ssize_t count;
  728. atomic_inc(&get_v2sdp(inode->i_sb)->sd_ops_file);
  729. if (*offset < 0)
  730. return -EINVAL;
  731. if (!access_ok(VERIFY_READ, buf, size))
  732. return -EFAULT;
  733. mutex_lock(&inode->i_mutex);
  734. if (file->f_flags & O_DIRECT)
  735. count = walk_vm(file, buf, size, offset,
  736. do_write_direct);
  737. else
  738. count = walk_vm(file, buf, size, offset, do_write_buf);
  739. mutex_unlock(&inode->i_mutex);
  740. return count;
  741. }
  742. /**
  743. * filldir_reg_func - Report a directory entry to the caller of gfs2_dir_read()
  744. * @opaque: opaque data used by the function
  745. * @name: the name of the directory entry
  746. * @length: the length of the name
  747. * @offset: the entry's offset in the directory
  748. * @inum: the inode number the entry points to
  749. * @type: the type of inode the entry points to
  750. *
  751. * Returns: 0 on success, 1 if buffer full
  752. */
  753. static int filldir_reg_func(void *opaque, const char *name, unsigned int length,
  754. uint64_t offset, struct gfs2_inum *inum,
  755. unsigned int type)
  756. {
  757. struct filldir_reg *fdr = (struct filldir_reg *)opaque;
  758. struct gfs2_sbd *sdp = fdr->fdr_sbd;
  759. int error;
  760. error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
  761. inum->no_formal_ino, type);
  762. if (error)
  763. return 1;
  764. if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
  765. gfs2_glock_prefetch_num(sdp,
  766. inum->no_addr, &gfs2_inode_glops,
  767. LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
  768. gfs2_glock_prefetch_num(sdp,
  769. inum->no_addr, &gfs2_iopen_glops,
  770. LM_ST_SHARED, LM_FLAG_TRY);
  771. }
  772. return 0;
  773. }
  774. /**
  775. * readdir_reg - Read directory entries from a directory
  776. * @file: The directory to read from
  777. * @dirent: Buffer for dirents
  778. * @filldir: Function used to do the copying
  779. *
  780. * Returns: errno
  781. */
  782. static int readdir_reg(struct file *file, void *dirent, filldir_t filldir)
  783. {
  784. struct gfs2_inode *dip = get_v2ip(file->f_mapping->host);
  785. struct filldir_reg fdr;
  786. struct gfs2_holder d_gh;
  787. uint64_t offset = file->f_pos;
  788. int error;
  789. fdr.fdr_sbd = dip->i_sbd;
  790. fdr.fdr_prefetch = 1;
  791. fdr.fdr_filldir = filldir;
  792. fdr.fdr_opaque = dirent;
  793. gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
  794. error = gfs2_glock_nq_atime(&d_gh);
  795. if (error) {
  796. gfs2_holder_uninit(&d_gh);
  797. return error;
  798. }
  799. error = gfs2_dir_read(dip, &offset, &fdr, filldir_reg_func);
  800. gfs2_glock_dq_uninit(&d_gh);
  801. file->f_pos = offset;
  802. return error;
  803. }
  804. /**
  805. * filldir_bad_func - Report a directory entry to the caller of gfs2_dir_read()
  806. * @opaque: opaque data used by the function
  807. * @name: the name of the directory entry
  808. * @length: the length of the name
  809. * @offset: the entry's offset in the directory
  810. * @inum: the inode number the entry points to
  811. * @type: the type of inode the entry points to
  812. *
  813. * For supporting NFS.
  814. *
  815. * Returns: 0 on success, 1 if buffer full
  816. */
  817. static int filldir_bad_func(void *opaque, const char *name, unsigned int length,
  818. uint64_t offset, struct gfs2_inum *inum,
  819. unsigned int type)
  820. {
  821. struct filldir_bad *fdb = (struct filldir_bad *)opaque;
  822. struct gfs2_sbd *sdp = fdb->fdb_sbd;
  823. struct filldir_bad_entry *fbe;
  824. if (fdb->fdb_entry_off == fdb->fdb_entry_num ||
  825. fdb->fdb_name_off + length > fdb->fdb_name_size)
  826. return 1;
  827. fbe = &fdb->fdb_entry[fdb->fdb_entry_off];
  828. fbe->fbe_name = fdb->fdb_name + fdb->fdb_name_off;
  829. memcpy(fbe->fbe_name, name, length);
  830. fbe->fbe_length = length;
  831. fbe->fbe_offset = offset;
  832. fbe->fbe_inum = *inum;
  833. fbe->fbe_type = type;
  834. fdb->fdb_entry_off++;
  835. fdb->fdb_name_off += length;
  836. if (!(length == 1 && *name == '.')) {
  837. gfs2_glock_prefetch_num(sdp,
  838. inum->no_addr, &gfs2_inode_glops,
  839. LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
  840. gfs2_glock_prefetch_num(sdp,
  841. inum->no_addr, &gfs2_iopen_glops,
  842. LM_ST_SHARED, LM_FLAG_TRY);
  843. }
  844. return 0;
  845. }
  846. /**
  847. * readdir_bad - Read directory entries from a directory
  848. * @file: The directory to read from
  849. * @dirent: Buffer for dirents
  850. * @filldir: Function used to do the copying
  851. *
  852. * For supporting NFS.
  853. *
  854. * Returns: errno
  855. */
  856. static int readdir_bad(struct file *file, void *dirent, filldir_t filldir)
  857. {
  858. struct gfs2_inode *dip = get_v2ip(file->f_mapping->host);
  859. struct gfs2_sbd *sdp = dip->i_sbd;
  860. struct filldir_reg fdr;
  861. unsigned int entries, size;
  862. struct filldir_bad *fdb;
  863. struct gfs2_holder d_gh;
  864. uint64_t offset = file->f_pos;
  865. unsigned int x;
  866. struct filldir_bad_entry *fbe;
  867. int error;
  868. entries = gfs2_tune_get(sdp, gt_entries_per_readdir);
  869. size = sizeof(struct filldir_bad) +
  870. entries * (sizeof(struct filldir_bad_entry) + GFS2_FAST_NAME_SIZE);
  871. fdb = kzalloc(size, GFP_KERNEL);
  872. if (!fdb)
  873. return -ENOMEM;
  874. fdb->fdb_sbd = sdp;
  875. fdb->fdb_entry = (struct filldir_bad_entry *)(fdb + 1);
  876. fdb->fdb_entry_num = entries;
  877. fdb->fdb_name = ((char *)fdb) + sizeof(struct filldir_bad) +
  878. entries * sizeof(struct filldir_bad_entry);
  879. fdb->fdb_name_size = entries * GFS2_FAST_NAME_SIZE;
  880. gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
  881. error = gfs2_glock_nq_atime(&d_gh);
  882. if (error) {
  883. gfs2_holder_uninit(&d_gh);
  884. goto out;
  885. }
  886. error = gfs2_dir_read(dip, &offset, fdb, filldir_bad_func);
  887. gfs2_glock_dq_uninit(&d_gh);
  888. fdr.fdr_sbd = sdp;
  889. fdr.fdr_prefetch = 0;
  890. fdr.fdr_filldir = filldir;
  891. fdr.fdr_opaque = dirent;
  892. for (x = 0; x < fdb->fdb_entry_off; x++) {
  893. fbe = &fdb->fdb_entry[x];
  894. error = filldir_reg_func(&fdr,
  895. fbe->fbe_name, fbe->fbe_length,
  896. fbe->fbe_offset,
  897. &fbe->fbe_inum, fbe->fbe_type);
  898. if (error) {
  899. file->f_pos = fbe->fbe_offset;
  900. error = 0;
  901. goto out;
  902. }
  903. }
  904. file->f_pos = offset;
  905. out:
  906. kfree(fdb);
  907. return error;
  908. }
  909. /**
  910. * gfs2_readdir - Read directory entries from a directory
  911. * @file: The directory to read from
  912. * @dirent: Buffer for dirents
  913. * @filldir: Function used to do the copying
  914. *
  915. * Returns: errno
  916. */
  917. static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
  918. {
  919. int error;
  920. atomic_inc(&get_v2sdp(file->f_mapping->host->i_sb)->sd_ops_file);
  921. if (strcmp(current->comm, "nfsd") != 0)
  922. error = readdir_reg(file, dirent, filldir);
  923. else
  924. error = readdir_bad(file, dirent, filldir);
  925. return error;
  926. }
  927. static int gfs2_ioctl_flags(struct gfs2_inode *ip, unsigned int cmd, unsigned long arg)
  928. {
  929. unsigned int lmode = (cmd == GFS2_IOCTL_SETFLAGS) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
  930. struct buffer_head *dibh;
  931. struct gfs2_holder i_gh;
  932. int error;
  933. __u32 flags = 0, change;
  934. if (cmd == GFS2_IOCTL_SETFLAGS) {
  935. error = get_user(flags, (__u32 __user *)arg);
  936. if (error)
  937. return -EFAULT;
  938. }
  939. error = gfs2_glock_nq_init(ip->i_gl, lmode, 0, &i_gh);
  940. if (error)
  941. return error;
  942. if (cmd == GFS2_IOCTL_SETFLAGS) {
  943. change = flags ^ ip->i_di.di_flags;
  944. error = -EPERM;
  945. if (change & (GFS2_DIF_IMMUTABLE|GFS2_DIF_APPENDONLY)) {
  946. if (!capable(CAP_LINUX_IMMUTABLE))
  947. goto out;
  948. }
  949. error = -EINVAL;
  950. if (flags & (GFS2_DIF_JDATA|GFS2_DIF_DIRECTIO)) {
  951. if (!S_ISREG(ip->i_di.di_mode))
  952. goto out;
  953. /* FIXME: Would be nice not to require the following test */
  954. if ((flags & GFS2_DIF_JDATA) && ip->i_di.di_size)
  955. goto out;
  956. }
  957. if (flags & (GFS2_DIF_INHERIT_JDATA|GFS2_DIF_INHERIT_DIRECTIO)) {
  958. if (!S_ISDIR(ip->i_di.di_mode))
  959. goto out;
  960. }
  961. error = gfs2_trans_begin(ip->i_sbd, RES_DINODE, 0);
  962. if (error)
  963. goto out;
  964. error = gfs2_meta_inode_buffer(ip, &dibh);
  965. if (error)
  966. goto out_trans_end;
  967. ip->i_di.di_flags = flags;
  968. gfs2_trans_add_bh(ip->i_gl, dibh);
  969. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  970. brelse(dibh);
  971. out_trans_end:
  972. gfs2_trans_end(ip->i_sbd);
  973. } else {
  974. flags = ip->i_di.di_flags;
  975. }
  976. out:
  977. gfs2_glock_dq_uninit(&i_gh);
  978. if (cmd == GFS2_IOCTL_GETFLAGS) {
  979. if (put_user(flags, (__u32 __user *)arg))
  980. return -EFAULT;
  981. }
  982. return error;
  983. }
  984. /**
  985. * gfs2_ioctl - do an ioctl on a file
  986. * @inode: the inode
  987. * @file: the file pointer
  988. * @cmd: the ioctl command
  989. * @arg: the argument
  990. *
  991. * Returns: errno
  992. */
  993. static int gfs2_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
  994. unsigned long arg)
  995. {
  996. struct gfs2_inode *ip = get_v2ip(inode);
  997. atomic_inc(&ip->i_sbd->sd_ops_file);
  998. switch (cmd) {
  999. case GFS2_IOCTL_IDENTIFY: {
  1000. unsigned int x = GFS2_MAGIC;
  1001. if (copy_to_user((unsigned int __user *)arg, &x, sizeof(unsigned int)))
  1002. return -EFAULT;
  1003. return 0;
  1004. case GFS2_IOCTL_SETFLAGS:
  1005. case GFS2_IOCTL_GETFLAGS:
  1006. return gfs2_ioctl_flags(ip, cmd, arg);
  1007. }
  1008. default:
  1009. return -ENOTTY;
  1010. }
  1011. }
  1012. /**
  1013. * gfs2_mmap -
  1014. * @file: The file to map
  1015. * @vma: The VMA which described the mapping
  1016. *
  1017. * Returns: 0 or error code
  1018. */
  1019. static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
  1020. {
  1021. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  1022. struct gfs2_holder i_gh;
  1023. int error;
  1024. atomic_inc(&ip->i_sbd->sd_ops_file);
  1025. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  1026. error = gfs2_glock_nq_atime(&i_gh);
  1027. if (error) {
  1028. gfs2_holder_uninit(&i_gh);
  1029. return error;
  1030. }
  1031. if (gfs2_is_jdata(ip)) {
  1032. if (vma->vm_flags & VM_MAYSHARE)
  1033. error = -EOPNOTSUPP;
  1034. else
  1035. vma->vm_ops = &gfs2_vm_ops_private;
  1036. } else {
  1037. /* This is VM_MAYWRITE instead of VM_WRITE because a call
  1038. to mprotect() can turn on VM_WRITE later. */
  1039. if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
  1040. (VM_MAYSHARE | VM_MAYWRITE))
  1041. vma->vm_ops = &gfs2_vm_ops_sharewrite;
  1042. else
  1043. vma->vm_ops = &gfs2_vm_ops_private;
  1044. }
  1045. gfs2_glock_dq_uninit(&i_gh);
  1046. return error;
  1047. }
  1048. /**
  1049. * gfs2_open - open a file
  1050. * @inode: the inode to open
  1051. * @file: the struct file for this opening
  1052. *
  1053. * Returns: errno
  1054. */
  1055. static int gfs2_open(struct inode *inode, struct file *file)
  1056. {
  1057. struct gfs2_inode *ip = get_v2ip(inode);
  1058. struct gfs2_holder i_gh;
  1059. struct gfs2_file *fp;
  1060. int error;
  1061. atomic_inc(&ip->i_sbd->sd_ops_file);
  1062. fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
  1063. if (!fp)
  1064. return -ENOMEM;
  1065. init_MUTEX(&fp->f_fl_mutex);
  1066. fp->f_inode = ip;
  1067. fp->f_vfile = file;
  1068. gfs2_assert_warn(ip->i_sbd, !get_v2fp(file));
  1069. set_v2fp(file, fp);
  1070. if (S_ISREG(ip->i_di.di_mode)) {
  1071. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  1072. &i_gh);
  1073. if (error)
  1074. goto fail;
  1075. if (!(file->f_flags & O_LARGEFILE) &&
  1076. ip->i_di.di_size > MAX_NON_LFS) {
  1077. error = -EFBIG;
  1078. goto fail_gunlock;
  1079. }
  1080. /* Listen to the Direct I/O flag */
  1081. if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
  1082. file->f_flags |= O_DIRECT;
  1083. /* Don't let the user open O_DIRECT on a jdata file */
  1084. if ((file->f_flags & O_DIRECT) && gfs2_is_jdata(ip)) {
  1085. error = -EINVAL;
  1086. goto fail_gunlock;
  1087. }
  1088. gfs2_glock_dq_uninit(&i_gh);
  1089. }
  1090. return 0;
  1091. fail_gunlock:
  1092. gfs2_glock_dq_uninit(&i_gh);
  1093. fail:
  1094. set_v2fp(file, NULL);
  1095. kfree(fp);
  1096. return error;
  1097. }
  1098. /**
  1099. * gfs2_close - called to close a struct file
  1100. * @inode: the inode the struct file belongs to
  1101. * @file: the struct file being closed
  1102. *
  1103. * Returns: errno
  1104. */
  1105. static int gfs2_close(struct inode *inode, struct file *file)
  1106. {
  1107. struct gfs2_sbd *sdp = get_v2sdp(inode->i_sb);
  1108. struct gfs2_file *fp;
  1109. atomic_inc(&sdp->sd_ops_file);
  1110. fp = get_v2fp(file);
  1111. set_v2fp(file, NULL);
  1112. if (gfs2_assert_warn(sdp, fp))
  1113. return -EIO;
  1114. kfree(fp);
  1115. return 0;
  1116. }
  1117. /**
  1118. * gfs2_fsync - sync the dirty data for a file (across the cluster)
  1119. * @file: the file that points to the dentry (we ignore this)
  1120. * @dentry: the dentry that points to the inode to sync
  1121. *
  1122. * Returns: errno
  1123. */
  1124. static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
  1125. {
  1126. struct gfs2_inode *ip = get_v2ip(dentry->d_inode);
  1127. atomic_inc(&ip->i_sbd->sd_ops_file);
  1128. gfs2_log_flush_glock(ip->i_gl);
  1129. return 0;
  1130. }
  1131. /**
  1132. * gfs2_lock - acquire/release a posix lock on a file
  1133. * @file: the file pointer
  1134. * @cmd: either modify or retrieve lock state, possibly wait
  1135. * @fl: type and range of lock
  1136. *
  1137. * Returns: errno
  1138. */
  1139. static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
  1140. {
  1141. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  1142. struct gfs2_sbd *sdp = ip->i_sbd;
  1143. struct lm_lockname name =
  1144. { .ln_number = ip->i_num.no_addr,
  1145. .ln_type = LM_TYPE_PLOCK };
  1146. atomic_inc(&sdp->sd_ops_file);
  1147. if (!(fl->fl_flags & FL_POSIX))
  1148. return -ENOLCK;
  1149. if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
  1150. return -ENOLCK;
  1151. if (sdp->sd_args.ar_localflocks) {
  1152. if (IS_GETLK(cmd)) {
  1153. struct file_lock *tmp;
  1154. lock_kernel();
  1155. tmp = posix_test_lock(file, fl);
  1156. fl->fl_type = F_UNLCK;
  1157. if (tmp)
  1158. memcpy(fl, tmp, sizeof(struct file_lock));
  1159. unlock_kernel();
  1160. return 0;
  1161. } else {
  1162. int error;
  1163. lock_kernel();
  1164. error = posix_lock_file_wait(file, fl);
  1165. unlock_kernel();
  1166. return error;
  1167. }
  1168. }
  1169. if (IS_GETLK(cmd))
  1170. return gfs2_lm_plock_get(sdp, &name, file, fl);
  1171. else if (fl->fl_type == F_UNLCK)
  1172. return gfs2_lm_punlock(sdp, &name, file, fl);
  1173. else
  1174. return gfs2_lm_plock(sdp, &name, file, cmd, fl);
  1175. }
  1176. /**
  1177. * gfs2_sendfile - Send bytes to a file or socket
  1178. * @in_file: The file to read from
  1179. * @out_file: The file to write to
  1180. * @count: The amount of data
  1181. * @offset: The beginning file offset
  1182. *
  1183. * Outputs: offset - updated according to number of bytes read
  1184. *
  1185. * Returns: The number of bytes sent, errno on failure
  1186. */
  1187. static ssize_t gfs2_sendfile(struct file *in_file, loff_t *offset, size_t count,
  1188. read_actor_t actor, void *target)
  1189. {
  1190. struct gfs2_inode *ip = get_v2ip(in_file->f_mapping->host);
  1191. struct gfs2_holder gh;
  1192. ssize_t retval;
  1193. atomic_inc(&ip->i_sbd->sd_ops_file);
  1194. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
  1195. retval = gfs2_glock_nq_atime(&gh);
  1196. if (retval)
  1197. goto out;
  1198. if (gfs2_is_jdata(ip))
  1199. retval = -EOPNOTSUPP;
  1200. else
  1201. retval = generic_file_sendfile(in_file, offset, count, actor,
  1202. target);
  1203. gfs2_glock_dq(&gh);
  1204. out:
  1205. gfs2_holder_uninit(&gh);
  1206. return retval;
  1207. }
  1208. static int do_flock(struct file *file, int cmd, struct file_lock *fl)
  1209. {
  1210. struct gfs2_file *fp = get_v2fp(file);
  1211. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  1212. struct gfs2_inode *ip = fp->f_inode;
  1213. struct gfs2_glock *gl;
  1214. unsigned int state;
  1215. int flags;
  1216. int error = 0;
  1217. state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
  1218. flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
  1219. down(&fp->f_fl_mutex);
  1220. gl = fl_gh->gh_gl;
  1221. if (gl) {
  1222. if (fl_gh->gh_state == state)
  1223. goto out;
  1224. gfs2_glock_hold(gl);
  1225. flock_lock_file_wait(file,
  1226. &(struct file_lock){.fl_type = F_UNLCK});
  1227. gfs2_glock_dq_uninit(fl_gh);
  1228. } else {
  1229. error = gfs2_glock_get(ip->i_sbd,
  1230. ip->i_num.no_addr, &gfs2_flock_glops,
  1231. CREATE, &gl);
  1232. if (error)
  1233. goto out;
  1234. }
  1235. gfs2_holder_init(gl, state, flags, fl_gh);
  1236. gfs2_glock_put(gl);
  1237. error = gfs2_glock_nq(fl_gh);
  1238. if (error) {
  1239. gfs2_holder_uninit(fl_gh);
  1240. if (error == GLR_TRYFAILED)
  1241. error = -EAGAIN;
  1242. } else {
  1243. error = flock_lock_file_wait(file, fl);
  1244. gfs2_assert_warn(ip->i_sbd, !error);
  1245. }
  1246. out:
  1247. up(&fp->f_fl_mutex);
  1248. return error;
  1249. }
  1250. static void do_unflock(struct file *file, struct file_lock *fl)
  1251. {
  1252. struct gfs2_file *fp = get_v2fp(file);
  1253. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  1254. down(&fp->f_fl_mutex);
  1255. flock_lock_file_wait(file, fl);
  1256. if (fl_gh->gh_gl)
  1257. gfs2_glock_dq_uninit(fl_gh);
  1258. up(&fp->f_fl_mutex);
  1259. }
  1260. /**
  1261. * gfs2_flock - acquire/release a flock lock on a file
  1262. * @file: the file pointer
  1263. * @cmd: either modify or retrieve lock state, possibly wait
  1264. * @fl: type and range of lock
  1265. *
  1266. * Returns: errno
  1267. */
  1268. static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
  1269. {
  1270. struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
  1271. struct gfs2_sbd *sdp = ip->i_sbd;
  1272. atomic_inc(&ip->i_sbd->sd_ops_file);
  1273. if (!(fl->fl_flags & FL_FLOCK))
  1274. return -ENOLCK;
  1275. if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
  1276. return -ENOLCK;
  1277. if (sdp->sd_args.ar_localflocks)
  1278. return flock_lock_file_wait(file, fl);
  1279. if (fl->fl_type == F_UNLCK) {
  1280. do_unflock(file, fl);
  1281. return 0;
  1282. } else
  1283. return do_flock(file, cmd, fl);
  1284. }
  1285. struct file_operations gfs2_file_fops = {
  1286. .llseek = gfs2_llseek,
  1287. .read = gfs2_read,
  1288. .write = gfs2_write,
  1289. .ioctl = gfs2_ioctl,
  1290. .mmap = gfs2_mmap,
  1291. .open = gfs2_open,
  1292. .release = gfs2_close,
  1293. .fsync = gfs2_fsync,
  1294. .lock = gfs2_lock,
  1295. .sendfile = gfs2_sendfile,
  1296. .flock = gfs2_flock,
  1297. };
  1298. struct file_operations gfs2_dir_fops = {
  1299. .readdir = gfs2_readdir,
  1300. .ioctl = gfs2_ioctl,
  1301. .open = gfs2_open,
  1302. .release = gfs2_close,
  1303. .fsync = gfs2_fsync,
  1304. .lock = gfs2_lock,
  1305. .flock = gfs2_flock,
  1306. };