pnfs.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include <linux/nfs_page.h>
  31. #include <linux/module.h>
  32. #include "internal.h"
  33. #include "pnfs.h"
  34. #include "iostat.h"
  35. #include "nfs4trace.h"
  36. #define NFSDBG_FACILITY NFSDBG_PNFS
  37. #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  38. /* Locking:
  39. *
  40. * pnfs_spinlock:
  41. * protects pnfs_modules_tbl.
  42. */
  43. static DEFINE_SPINLOCK(pnfs_spinlock);
  44. /*
  45. * pnfs_modules_tbl holds all pnfs modules
  46. */
  47. static LIST_HEAD(pnfs_modules_tbl);
  48. /* Return the registered pnfs layout driver module matching given id */
  49. static struct pnfs_layoutdriver_type *
  50. find_pnfs_driver_locked(u32 id)
  51. {
  52. struct pnfs_layoutdriver_type *local;
  53. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  54. if (local->id == id)
  55. goto out;
  56. local = NULL;
  57. out:
  58. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  59. return local;
  60. }
  61. static struct pnfs_layoutdriver_type *
  62. find_pnfs_driver(u32 id)
  63. {
  64. struct pnfs_layoutdriver_type *local;
  65. spin_lock(&pnfs_spinlock);
  66. local = find_pnfs_driver_locked(id);
  67. if (local != NULL && !try_module_get(local->owner)) {
  68. dprintk("%s: Could not grab reference on module\n", __func__);
  69. local = NULL;
  70. }
  71. spin_unlock(&pnfs_spinlock);
  72. return local;
  73. }
  74. void
  75. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  76. {
  77. if (nfss->pnfs_curr_ld) {
  78. if (nfss->pnfs_curr_ld->clear_layoutdriver)
  79. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  80. /* Decrement the MDS count. Purge the deviceid cache if zero */
  81. if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  82. nfs4_deviceid_purge_client(nfss->nfs_client);
  83. module_put(nfss->pnfs_curr_ld->owner);
  84. }
  85. nfss->pnfs_curr_ld = NULL;
  86. }
  87. /*
  88. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  89. * Currently only one pNFS layout driver per filesystem is supported.
  90. *
  91. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  92. */
  93. void
  94. set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
  95. u32 id)
  96. {
  97. struct pnfs_layoutdriver_type *ld_type = NULL;
  98. if (id == 0)
  99. goto out_no_driver;
  100. if (!(server->nfs_client->cl_exchange_flags &
  101. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  102. printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
  103. __func__, id, server->nfs_client->cl_exchange_flags);
  104. goto out_no_driver;
  105. }
  106. ld_type = find_pnfs_driver(id);
  107. if (!ld_type) {
  108. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  109. ld_type = find_pnfs_driver(id);
  110. if (!ld_type) {
  111. dprintk("%s: No pNFS module found for %u.\n",
  112. __func__, id);
  113. goto out_no_driver;
  114. }
  115. }
  116. server->pnfs_curr_ld = ld_type;
  117. if (ld_type->set_layoutdriver
  118. && ld_type->set_layoutdriver(server, mntfh)) {
  119. printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
  120. "driver %u.\n", __func__, id);
  121. module_put(ld_type->owner);
  122. goto out_no_driver;
  123. }
  124. /* Bump the MDS count */
  125. atomic_inc(&server->nfs_client->cl_mds_count);
  126. dprintk("%s: pNFS module for %u set\n", __func__, id);
  127. return;
  128. out_no_driver:
  129. dprintk("%s: Using NFSv4 I/O\n", __func__);
  130. server->pnfs_curr_ld = NULL;
  131. }
  132. int
  133. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  134. {
  135. int status = -EINVAL;
  136. struct pnfs_layoutdriver_type *tmp;
  137. if (ld_type->id == 0) {
  138. printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
  139. return status;
  140. }
  141. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  142. printk(KERN_ERR "NFS: %s Layout driver must provide "
  143. "alloc_lseg and free_lseg.\n", __func__);
  144. return status;
  145. }
  146. spin_lock(&pnfs_spinlock);
  147. tmp = find_pnfs_driver_locked(ld_type->id);
  148. if (!tmp) {
  149. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  150. status = 0;
  151. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  152. ld_type->name);
  153. } else {
  154. printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
  155. __func__, ld_type->id);
  156. }
  157. spin_unlock(&pnfs_spinlock);
  158. return status;
  159. }
  160. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  161. void
  162. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  163. {
  164. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  165. spin_lock(&pnfs_spinlock);
  166. list_del(&ld_type->pnfs_tblid);
  167. spin_unlock(&pnfs_spinlock);
  168. }
  169. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  170. /*
  171. * pNFS client layout cache
  172. */
  173. /* Need to hold i_lock if caller does not already hold reference */
  174. void
  175. pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
  176. {
  177. atomic_inc(&lo->plh_refcount);
  178. }
  179. static struct pnfs_layout_hdr *
  180. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  181. {
  182. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  183. return ld->alloc_layout_hdr(ino, gfp_flags);
  184. }
  185. static void
  186. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  187. {
  188. struct nfs_server *server = NFS_SERVER(lo->plh_inode);
  189. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  190. if (!list_empty(&lo->plh_layouts)) {
  191. struct nfs_client *clp = server->nfs_client;
  192. spin_lock(&clp->cl_lock);
  193. list_del_init(&lo->plh_layouts);
  194. spin_unlock(&clp->cl_lock);
  195. }
  196. put_rpccred(lo->plh_lc_cred);
  197. return ld->free_layout_hdr(lo);
  198. }
  199. static void
  200. pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
  201. {
  202. struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
  203. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  204. nfsi->layout = NULL;
  205. /* Reset MDS Threshold I/O counters */
  206. nfsi->write_io = 0;
  207. nfsi->read_io = 0;
  208. }
  209. void
  210. pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
  211. {
  212. struct inode *inode = lo->plh_inode;
  213. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  214. pnfs_detach_layout_hdr(lo);
  215. spin_unlock(&inode->i_lock);
  216. pnfs_free_layout_hdr(lo);
  217. }
  218. }
  219. static int
  220. pnfs_iomode_to_fail_bit(u32 iomode)
  221. {
  222. return iomode == IOMODE_RW ?
  223. NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
  224. }
  225. static void
  226. pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  227. {
  228. lo->plh_retry_timestamp = jiffies;
  229. if (!test_and_set_bit(fail_bit, &lo->plh_flags))
  230. atomic_inc(&lo->plh_refcount);
  231. }
  232. static void
  233. pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  234. {
  235. if (test_and_clear_bit(fail_bit, &lo->plh_flags))
  236. atomic_dec(&lo->plh_refcount);
  237. }
  238. static void
  239. pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  240. {
  241. struct inode *inode = lo->plh_inode;
  242. struct pnfs_layout_range range = {
  243. .iomode = iomode,
  244. .offset = 0,
  245. .length = NFS4_MAX_UINT64,
  246. };
  247. LIST_HEAD(head);
  248. spin_lock(&inode->i_lock);
  249. pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  250. pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
  251. spin_unlock(&inode->i_lock);
  252. pnfs_free_lseg_list(&head);
  253. dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
  254. iomode == IOMODE_RW ? "RW" : "READ");
  255. }
  256. static bool
  257. pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  258. {
  259. unsigned long start, end;
  260. int fail_bit = pnfs_iomode_to_fail_bit(iomode);
  261. if (test_bit(fail_bit, &lo->plh_flags) == 0)
  262. return false;
  263. end = jiffies;
  264. start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
  265. if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
  266. /* It is time to retry the failed layoutgets */
  267. pnfs_layout_clear_fail_bit(lo, fail_bit);
  268. return false;
  269. }
  270. return true;
  271. }
  272. static void
  273. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  274. {
  275. INIT_LIST_HEAD(&lseg->pls_list);
  276. INIT_LIST_HEAD(&lseg->pls_lc_list);
  277. atomic_set(&lseg->pls_refcount, 1);
  278. smp_mb();
  279. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  280. lseg->pls_layout = lo;
  281. }
  282. static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
  283. {
  284. struct inode *ino = lseg->pls_layout->plh_inode;
  285. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  286. }
  287. static void
  288. pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
  289. struct pnfs_layout_segment *lseg)
  290. {
  291. struct inode *inode = lo->plh_inode;
  292. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  293. list_del_init(&lseg->pls_list);
  294. /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
  295. atomic_dec(&lo->plh_refcount);
  296. if (list_empty(&lo->plh_segs))
  297. clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  298. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  299. }
  300. void
  301. pnfs_put_lseg(struct pnfs_layout_segment *lseg)
  302. {
  303. struct pnfs_layout_hdr *lo;
  304. struct inode *inode;
  305. if (!lseg)
  306. return;
  307. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  308. atomic_read(&lseg->pls_refcount),
  309. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  310. lo = lseg->pls_layout;
  311. inode = lo->plh_inode;
  312. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  313. pnfs_get_layout_hdr(lo);
  314. pnfs_layout_remove_lseg(lo, lseg);
  315. spin_unlock(&inode->i_lock);
  316. pnfs_free_lseg(lseg);
  317. pnfs_put_layout_hdr(lo);
  318. }
  319. }
  320. EXPORT_SYMBOL_GPL(pnfs_put_lseg);
  321. static u64
  322. end_offset(u64 start, u64 len)
  323. {
  324. u64 end;
  325. end = start + len;
  326. return end >= start ? end : NFS4_MAX_UINT64;
  327. }
  328. /*
  329. * is l2 fully contained in l1?
  330. * start1 end1
  331. * [----------------------------------)
  332. * start2 end2
  333. * [----------------)
  334. */
  335. static bool
  336. pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
  337. const struct pnfs_layout_range *l2)
  338. {
  339. u64 start1 = l1->offset;
  340. u64 end1 = end_offset(start1, l1->length);
  341. u64 start2 = l2->offset;
  342. u64 end2 = end_offset(start2, l2->length);
  343. return (start1 <= start2) && (end1 >= end2);
  344. }
  345. /*
  346. * is l1 and l2 intersecting?
  347. * start1 end1
  348. * [----------------------------------)
  349. * start2 end2
  350. * [----------------)
  351. */
  352. static bool
  353. pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
  354. const struct pnfs_layout_range *l2)
  355. {
  356. u64 start1 = l1->offset;
  357. u64 end1 = end_offset(start1, l1->length);
  358. u64 start2 = l2->offset;
  359. u64 end2 = end_offset(start2, l2->length);
  360. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  361. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  362. }
  363. static bool
  364. should_free_lseg(const struct pnfs_layout_range *lseg_range,
  365. const struct pnfs_layout_range *recall_range)
  366. {
  367. return (recall_range->iomode == IOMODE_ANY ||
  368. lseg_range->iomode == recall_range->iomode) &&
  369. pnfs_lseg_range_intersecting(lseg_range, recall_range);
  370. }
  371. static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
  372. struct list_head *tmp_list)
  373. {
  374. if (!atomic_dec_and_test(&lseg->pls_refcount))
  375. return false;
  376. pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
  377. list_add(&lseg->pls_list, tmp_list);
  378. return true;
  379. }
  380. /* Returns 1 if lseg is removed from list, 0 otherwise */
  381. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  382. struct list_head *tmp_list)
  383. {
  384. int rv = 0;
  385. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  386. /* Remove the reference keeping the lseg in the
  387. * list. It will now be removed when all
  388. * outstanding io is finished.
  389. */
  390. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  391. atomic_read(&lseg->pls_refcount));
  392. if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
  393. rv = 1;
  394. }
  395. return rv;
  396. }
  397. /* Returns count of number of matching invalid lsegs remaining in list
  398. * after call.
  399. */
  400. int
  401. pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  402. struct list_head *tmp_list,
  403. struct pnfs_layout_range *recall_range)
  404. {
  405. struct pnfs_layout_segment *lseg, *next;
  406. int invalid = 0, removed = 0;
  407. dprintk("%s:Begin lo %p\n", __func__, lo);
  408. if (list_empty(&lo->plh_segs))
  409. return 0;
  410. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  411. if (!recall_range ||
  412. should_free_lseg(&lseg->pls_range, recall_range)) {
  413. dprintk("%s: freeing lseg %p iomode %d "
  414. "offset %llu length %llu\n", __func__,
  415. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  416. lseg->pls_range.length);
  417. invalid++;
  418. removed += mark_lseg_invalid(lseg, tmp_list);
  419. }
  420. dprintk("%s:Return %i\n", __func__, invalid - removed);
  421. return invalid - removed;
  422. }
  423. /* note free_me must contain lsegs from a single layout_hdr */
  424. void
  425. pnfs_free_lseg_list(struct list_head *free_me)
  426. {
  427. struct pnfs_layout_segment *lseg, *tmp;
  428. if (list_empty(free_me))
  429. return;
  430. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  431. list_del(&lseg->pls_list);
  432. pnfs_free_lseg(lseg);
  433. }
  434. }
  435. void
  436. pnfs_destroy_layout(struct nfs_inode *nfsi)
  437. {
  438. struct pnfs_layout_hdr *lo;
  439. LIST_HEAD(tmp_list);
  440. spin_lock(&nfsi->vfs_inode.i_lock);
  441. lo = nfsi->layout;
  442. if (lo) {
  443. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  444. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  445. pnfs_get_layout_hdr(lo);
  446. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
  447. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
  448. spin_unlock(&nfsi->vfs_inode.i_lock);
  449. pnfs_free_lseg_list(&tmp_list);
  450. pnfs_put_layout_hdr(lo);
  451. } else
  452. spin_unlock(&nfsi->vfs_inode.i_lock);
  453. }
  454. EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
  455. static bool
  456. pnfs_layout_add_bulk_destroy_list(struct inode *inode,
  457. struct list_head *layout_list)
  458. {
  459. struct pnfs_layout_hdr *lo;
  460. bool ret = false;
  461. spin_lock(&inode->i_lock);
  462. lo = NFS_I(inode)->layout;
  463. if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
  464. pnfs_get_layout_hdr(lo);
  465. list_add(&lo->plh_bulk_destroy, layout_list);
  466. ret = true;
  467. }
  468. spin_unlock(&inode->i_lock);
  469. return ret;
  470. }
  471. /* Caller must hold rcu_read_lock and clp->cl_lock */
  472. static int
  473. pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
  474. struct nfs_server *server,
  475. struct list_head *layout_list)
  476. {
  477. struct pnfs_layout_hdr *lo, *next;
  478. struct inode *inode;
  479. list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
  480. inode = igrab(lo->plh_inode);
  481. if (inode == NULL)
  482. continue;
  483. list_del_init(&lo->plh_layouts);
  484. if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
  485. continue;
  486. rcu_read_unlock();
  487. spin_unlock(&clp->cl_lock);
  488. iput(inode);
  489. spin_lock(&clp->cl_lock);
  490. rcu_read_lock();
  491. return -EAGAIN;
  492. }
  493. return 0;
  494. }
  495. static int
  496. pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
  497. bool is_bulk_recall)
  498. {
  499. struct pnfs_layout_hdr *lo;
  500. struct inode *inode;
  501. struct pnfs_layout_range range = {
  502. .iomode = IOMODE_ANY,
  503. .offset = 0,
  504. .length = NFS4_MAX_UINT64,
  505. };
  506. LIST_HEAD(lseg_list);
  507. int ret = 0;
  508. while (!list_empty(layout_list)) {
  509. lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
  510. plh_bulk_destroy);
  511. dprintk("%s freeing layout for inode %lu\n", __func__,
  512. lo->plh_inode->i_ino);
  513. inode = lo->plh_inode;
  514. spin_lock(&inode->i_lock);
  515. list_del_init(&lo->plh_bulk_destroy);
  516. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  517. if (is_bulk_recall)
  518. set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  519. if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
  520. ret = -EAGAIN;
  521. spin_unlock(&inode->i_lock);
  522. pnfs_free_lseg_list(&lseg_list);
  523. pnfs_put_layout_hdr(lo);
  524. iput(inode);
  525. }
  526. return ret;
  527. }
  528. int
  529. pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
  530. struct nfs_fsid *fsid,
  531. bool is_recall)
  532. {
  533. struct nfs_server *server;
  534. LIST_HEAD(layout_list);
  535. spin_lock(&clp->cl_lock);
  536. rcu_read_lock();
  537. restart:
  538. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  539. if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
  540. continue;
  541. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  542. server,
  543. &layout_list) != 0)
  544. goto restart;
  545. }
  546. rcu_read_unlock();
  547. spin_unlock(&clp->cl_lock);
  548. if (list_empty(&layout_list))
  549. return 0;
  550. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  551. }
  552. int
  553. pnfs_destroy_layouts_byclid(struct nfs_client *clp,
  554. bool is_recall)
  555. {
  556. struct nfs_server *server;
  557. LIST_HEAD(layout_list);
  558. spin_lock(&clp->cl_lock);
  559. rcu_read_lock();
  560. restart:
  561. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  562. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  563. server,
  564. &layout_list) != 0)
  565. goto restart;
  566. }
  567. rcu_read_unlock();
  568. spin_unlock(&clp->cl_lock);
  569. if (list_empty(&layout_list))
  570. return 0;
  571. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  572. }
  573. /*
  574. * Called by the state manger to remove all layouts established under an
  575. * expired lease.
  576. */
  577. void
  578. pnfs_destroy_all_layouts(struct nfs_client *clp)
  579. {
  580. nfs4_deviceid_mark_client_invalid(clp);
  581. nfs4_deviceid_purge_client(clp);
  582. pnfs_destroy_layouts_byclid(clp, false);
  583. }
  584. /*
  585. * Compare 2 layout stateid sequence ids, to see which is newer,
  586. * taking into account wraparound issues.
  587. */
  588. static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
  589. {
  590. return (s32)(s1 - s2) > 0;
  591. }
  592. static void
  593. pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo,
  594. const nfs4_stateid *new,
  595. struct list_head *free_me_list)
  596. {
  597. if (nfs4_stateid_match_other(&lo->plh_stateid, new))
  598. return;
  599. /* Layout is new! Kill existing layout segments */
  600. pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL);
  601. }
  602. /* update lo->plh_stateid with new if is more recent */
  603. void
  604. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  605. bool update_barrier)
  606. {
  607. u32 oldseq, newseq, new_barrier;
  608. int empty = list_empty(&lo->plh_segs);
  609. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  610. newseq = be32_to_cpu(new->seqid);
  611. if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
  612. nfs4_stateid_copy(&lo->plh_stateid, new);
  613. if (update_barrier) {
  614. new_barrier = be32_to_cpu(new->seqid);
  615. } else {
  616. /* Because of wraparound, we want to keep the barrier
  617. * "close" to the current seqids.
  618. */
  619. new_barrier = newseq - atomic_read(&lo->plh_outstanding);
  620. }
  621. if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
  622. lo->plh_barrier = new_barrier;
  623. }
  624. }
  625. static bool
  626. pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
  627. const nfs4_stateid *stateid)
  628. {
  629. u32 seqid = be32_to_cpu(stateid->seqid);
  630. return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
  631. }
  632. /* lget is set to 1 if called from inside send_layoutget call chain */
  633. static bool
  634. pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
  635. {
  636. return lo->plh_block_lgets ||
  637. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  638. (list_empty(&lo->plh_segs) &&
  639. (atomic_read(&lo->plh_outstanding) > lget));
  640. }
  641. int
  642. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  643. struct nfs4_state *open_state)
  644. {
  645. int status = 0;
  646. dprintk("--> %s\n", __func__);
  647. spin_lock(&lo->plh_inode->i_lock);
  648. if (pnfs_layoutgets_blocked(lo, 1)) {
  649. status = -EAGAIN;
  650. } else if (!nfs4_valid_open_stateid(open_state)) {
  651. status = -EBADF;
  652. } else if (list_empty(&lo->plh_segs)) {
  653. int seq;
  654. do {
  655. seq = read_seqbegin(&open_state->seqlock);
  656. nfs4_stateid_copy(dst, &open_state->stateid);
  657. } while (read_seqretry(&open_state->seqlock, seq));
  658. } else
  659. nfs4_stateid_copy(dst, &lo->plh_stateid);
  660. spin_unlock(&lo->plh_inode->i_lock);
  661. dprintk("<-- %s\n", __func__);
  662. return status;
  663. }
  664. /*
  665. * Get layout from server.
  666. * for now, assume that whole file layouts are requested.
  667. * arg->offset: 0
  668. * arg->length: all ones
  669. */
  670. static struct pnfs_layout_segment *
  671. send_layoutget(struct pnfs_layout_hdr *lo,
  672. struct nfs_open_context *ctx,
  673. struct pnfs_layout_range *range,
  674. gfp_t gfp_flags)
  675. {
  676. struct inode *ino = lo->plh_inode;
  677. struct nfs_server *server = NFS_SERVER(ino);
  678. struct nfs4_layoutget *lgp;
  679. struct pnfs_layout_segment *lseg;
  680. dprintk("--> %s\n", __func__);
  681. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  682. if (lgp == NULL)
  683. return NULL;
  684. lgp->args.minlength = PAGE_CACHE_SIZE;
  685. if (lgp->args.minlength > range->length)
  686. lgp->args.minlength = range->length;
  687. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  688. lgp->args.range = *range;
  689. lgp->args.type = server->pnfs_curr_ld->id;
  690. lgp->args.inode = ino;
  691. lgp->args.ctx = get_nfs_open_context(ctx);
  692. lgp->gfp_flags = gfp_flags;
  693. lgp->cred = lo->plh_lc_cred;
  694. /* Synchronously retrieve layout information from server and
  695. * store in lseg.
  696. */
  697. lseg = nfs4_proc_layoutget(lgp, gfp_flags);
  698. if (IS_ERR(lseg)) {
  699. switch (PTR_ERR(lseg)) {
  700. case -ENOMEM:
  701. case -ERESTARTSYS:
  702. break;
  703. default:
  704. /* remember that LAYOUTGET failed and suspend trying */
  705. pnfs_layout_io_set_failed(lo, range->iomode);
  706. }
  707. return NULL;
  708. }
  709. return lseg;
  710. }
  711. static void pnfs_clear_layoutcommit(struct inode *inode,
  712. struct list_head *head)
  713. {
  714. struct nfs_inode *nfsi = NFS_I(inode);
  715. struct pnfs_layout_segment *lseg, *tmp;
  716. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  717. return;
  718. list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
  719. if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  720. continue;
  721. pnfs_lseg_dec_and_remove_zero(lseg, head);
  722. }
  723. }
  724. /*
  725. * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  726. * when the layout segment list is empty.
  727. *
  728. * Note that a pnfs_layout_hdr can exist with an empty layout segment
  729. * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
  730. * deviceid is marked invalid.
  731. */
  732. int
  733. _pnfs_return_layout(struct inode *ino)
  734. {
  735. struct pnfs_layout_hdr *lo = NULL;
  736. struct nfs_inode *nfsi = NFS_I(ino);
  737. LIST_HEAD(tmp_list);
  738. struct nfs4_layoutreturn *lrp;
  739. nfs4_stateid stateid;
  740. int status = 0, empty;
  741. dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
  742. spin_lock(&ino->i_lock);
  743. lo = nfsi->layout;
  744. if (!lo) {
  745. spin_unlock(&ino->i_lock);
  746. dprintk("NFS: %s no layout to return\n", __func__);
  747. goto out;
  748. }
  749. stateid = nfsi->layout->plh_stateid;
  750. /* Reference matched in nfs4_layoutreturn_release */
  751. pnfs_get_layout_hdr(lo);
  752. empty = list_empty(&lo->plh_segs);
  753. pnfs_clear_layoutcommit(ino, &tmp_list);
  754. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  755. /* Don't send a LAYOUTRETURN if list was initially empty */
  756. if (empty) {
  757. spin_unlock(&ino->i_lock);
  758. pnfs_put_layout_hdr(lo);
  759. dprintk("NFS: %s no layout segments to return\n", __func__);
  760. goto out;
  761. }
  762. lo->plh_block_lgets++;
  763. spin_unlock(&ino->i_lock);
  764. pnfs_free_lseg_list(&tmp_list);
  765. lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
  766. if (unlikely(lrp == NULL)) {
  767. status = -ENOMEM;
  768. spin_lock(&ino->i_lock);
  769. lo->plh_block_lgets--;
  770. spin_unlock(&ino->i_lock);
  771. pnfs_put_layout_hdr(lo);
  772. goto out;
  773. }
  774. lrp->args.stateid = stateid;
  775. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  776. lrp->args.inode = ino;
  777. lrp->args.layout = lo;
  778. lrp->clp = NFS_SERVER(ino)->nfs_client;
  779. lrp->cred = lo->plh_lc_cred;
  780. status = nfs4_proc_layoutreturn(lrp);
  781. out:
  782. dprintk("<-- %s status: %d\n", __func__, status);
  783. return status;
  784. }
  785. EXPORT_SYMBOL_GPL(_pnfs_return_layout);
  786. int
  787. pnfs_commit_and_return_layout(struct inode *inode)
  788. {
  789. struct pnfs_layout_hdr *lo;
  790. int ret;
  791. spin_lock(&inode->i_lock);
  792. lo = NFS_I(inode)->layout;
  793. if (lo == NULL) {
  794. spin_unlock(&inode->i_lock);
  795. return 0;
  796. }
  797. pnfs_get_layout_hdr(lo);
  798. /* Block new layoutgets and read/write to ds */
  799. lo->plh_block_lgets++;
  800. spin_unlock(&inode->i_lock);
  801. filemap_fdatawait(inode->i_mapping);
  802. ret = pnfs_layoutcommit_inode(inode, true);
  803. if (ret == 0)
  804. ret = _pnfs_return_layout(inode);
  805. spin_lock(&inode->i_lock);
  806. lo->plh_block_lgets--;
  807. spin_unlock(&inode->i_lock);
  808. pnfs_put_layout_hdr(lo);
  809. return ret;
  810. }
  811. bool pnfs_roc(struct inode *ino)
  812. {
  813. struct pnfs_layout_hdr *lo;
  814. struct pnfs_layout_segment *lseg, *tmp;
  815. LIST_HEAD(tmp_list);
  816. bool found = false;
  817. spin_lock(&ino->i_lock);
  818. lo = NFS_I(ino)->layout;
  819. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  820. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  821. goto out_nolayout;
  822. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  823. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  824. mark_lseg_invalid(lseg, &tmp_list);
  825. found = true;
  826. }
  827. if (!found)
  828. goto out_nolayout;
  829. lo->plh_block_lgets++;
  830. pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
  831. spin_unlock(&ino->i_lock);
  832. pnfs_free_lseg_list(&tmp_list);
  833. return true;
  834. out_nolayout:
  835. spin_unlock(&ino->i_lock);
  836. return false;
  837. }
  838. void pnfs_roc_release(struct inode *ino)
  839. {
  840. struct pnfs_layout_hdr *lo;
  841. spin_lock(&ino->i_lock);
  842. lo = NFS_I(ino)->layout;
  843. lo->plh_block_lgets--;
  844. if (atomic_dec_and_test(&lo->plh_refcount)) {
  845. pnfs_detach_layout_hdr(lo);
  846. spin_unlock(&ino->i_lock);
  847. pnfs_free_layout_hdr(lo);
  848. } else
  849. spin_unlock(&ino->i_lock);
  850. }
  851. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  852. {
  853. struct pnfs_layout_hdr *lo;
  854. spin_lock(&ino->i_lock);
  855. lo = NFS_I(ino)->layout;
  856. if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
  857. lo->plh_barrier = barrier;
  858. spin_unlock(&ino->i_lock);
  859. }
  860. bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
  861. {
  862. struct nfs_inode *nfsi = NFS_I(ino);
  863. struct pnfs_layout_hdr *lo;
  864. struct pnfs_layout_segment *lseg;
  865. u32 current_seqid;
  866. bool found = false;
  867. spin_lock(&ino->i_lock);
  868. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  869. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  870. rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
  871. found = true;
  872. goto out;
  873. }
  874. lo = nfsi->layout;
  875. current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
  876. /* Since close does not return a layout stateid for use as
  877. * a barrier, we choose the worst-case barrier.
  878. */
  879. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  880. out:
  881. spin_unlock(&ino->i_lock);
  882. return found;
  883. }
  884. /*
  885. * Compare two layout segments for sorting into layout cache.
  886. * We want to preferentially return RW over RO layouts, so ensure those
  887. * are seen first.
  888. */
  889. static s64
  890. pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
  891. const struct pnfs_layout_range *l2)
  892. {
  893. s64 d;
  894. /* high offset > low offset */
  895. d = l1->offset - l2->offset;
  896. if (d)
  897. return d;
  898. /* short length > long length */
  899. d = l2->length - l1->length;
  900. if (d)
  901. return d;
  902. /* read > read/write */
  903. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  904. }
  905. static void
  906. pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
  907. struct pnfs_layout_segment *lseg)
  908. {
  909. struct pnfs_layout_segment *lp;
  910. dprintk("%s:Begin\n", __func__);
  911. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  912. if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
  913. continue;
  914. list_add_tail(&lseg->pls_list, &lp->pls_list);
  915. dprintk("%s: inserted lseg %p "
  916. "iomode %d offset %llu length %llu before "
  917. "lp %p iomode %d offset %llu length %llu\n",
  918. __func__, lseg, lseg->pls_range.iomode,
  919. lseg->pls_range.offset, lseg->pls_range.length,
  920. lp, lp->pls_range.iomode, lp->pls_range.offset,
  921. lp->pls_range.length);
  922. goto out;
  923. }
  924. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  925. dprintk("%s: inserted lseg %p "
  926. "iomode %d offset %llu length %llu at tail\n",
  927. __func__, lseg, lseg->pls_range.iomode,
  928. lseg->pls_range.offset, lseg->pls_range.length);
  929. out:
  930. pnfs_get_layout_hdr(lo);
  931. dprintk("%s:Return\n", __func__);
  932. }
  933. static struct pnfs_layout_hdr *
  934. alloc_init_layout_hdr(struct inode *ino,
  935. struct nfs_open_context *ctx,
  936. gfp_t gfp_flags)
  937. {
  938. struct pnfs_layout_hdr *lo;
  939. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  940. if (!lo)
  941. return NULL;
  942. atomic_set(&lo->plh_refcount, 1);
  943. INIT_LIST_HEAD(&lo->plh_layouts);
  944. INIT_LIST_HEAD(&lo->plh_segs);
  945. INIT_LIST_HEAD(&lo->plh_bulk_destroy);
  946. lo->plh_inode = ino;
  947. lo->plh_lc_cred = get_rpccred(ctx->cred);
  948. return lo;
  949. }
  950. static struct pnfs_layout_hdr *
  951. pnfs_find_alloc_layout(struct inode *ino,
  952. struct nfs_open_context *ctx,
  953. gfp_t gfp_flags)
  954. {
  955. struct nfs_inode *nfsi = NFS_I(ino);
  956. struct pnfs_layout_hdr *new = NULL;
  957. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  958. if (nfsi->layout != NULL)
  959. goto out_existing;
  960. spin_unlock(&ino->i_lock);
  961. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  962. spin_lock(&ino->i_lock);
  963. if (likely(nfsi->layout == NULL)) { /* Won the race? */
  964. nfsi->layout = new;
  965. return new;
  966. } else if (new != NULL)
  967. pnfs_free_layout_hdr(new);
  968. out_existing:
  969. pnfs_get_layout_hdr(nfsi->layout);
  970. return nfsi->layout;
  971. }
  972. /*
  973. * iomode matching rules:
  974. * iomode lseg match
  975. * ----- ----- -----
  976. * ANY READ true
  977. * ANY RW true
  978. * RW READ false
  979. * RW RW true
  980. * READ READ true
  981. * READ RW true
  982. */
  983. static bool
  984. pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
  985. const struct pnfs_layout_range *range)
  986. {
  987. struct pnfs_layout_range range1;
  988. if ((range->iomode == IOMODE_RW &&
  989. ls_range->iomode != IOMODE_RW) ||
  990. !pnfs_lseg_range_intersecting(ls_range, range))
  991. return 0;
  992. /* range1 covers only the first byte in the range */
  993. range1 = *range;
  994. range1.length = 1;
  995. return pnfs_lseg_range_contained(ls_range, &range1);
  996. }
  997. /*
  998. * lookup range in layout
  999. */
  1000. static struct pnfs_layout_segment *
  1001. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  1002. struct pnfs_layout_range *range)
  1003. {
  1004. struct pnfs_layout_segment *lseg, *ret = NULL;
  1005. dprintk("%s:Begin\n", __func__);
  1006. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  1007. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  1008. pnfs_lseg_range_match(&lseg->pls_range, range)) {
  1009. ret = pnfs_get_lseg(lseg);
  1010. break;
  1011. }
  1012. if (lseg->pls_range.offset > range->offset)
  1013. break;
  1014. }
  1015. dprintk("%s:Return lseg %p ref %d\n",
  1016. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  1017. return ret;
  1018. }
  1019. /*
  1020. * Use mdsthreshold hints set at each OPEN to determine if I/O should go
  1021. * to the MDS or over pNFS
  1022. *
  1023. * The nfs_inode read_io and write_io fields are cumulative counters reset
  1024. * when there are no layout segments. Note that in pnfs_update_layout iomode
  1025. * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
  1026. * WRITE request.
  1027. *
  1028. * A return of true means use MDS I/O.
  1029. *
  1030. * From rfc 5661:
  1031. * If a file's size is smaller than the file size threshold, data accesses
  1032. * SHOULD be sent to the metadata server. If an I/O request has a length that
  1033. * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
  1034. * server. If both file size and I/O size are provided, the client SHOULD
  1035. * reach or exceed both thresholds before sending its read or write
  1036. * requests to the data server.
  1037. */
  1038. static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
  1039. struct inode *ino, int iomode)
  1040. {
  1041. struct nfs4_threshold *t = ctx->mdsthreshold;
  1042. struct nfs_inode *nfsi = NFS_I(ino);
  1043. loff_t fsize = i_size_read(ino);
  1044. bool size = false, size_set = false, io = false, io_set = false, ret = false;
  1045. if (t == NULL)
  1046. return ret;
  1047. dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
  1048. __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
  1049. switch (iomode) {
  1050. case IOMODE_READ:
  1051. if (t->bm & THRESHOLD_RD) {
  1052. dprintk("%s fsize %llu\n", __func__, fsize);
  1053. size_set = true;
  1054. if (fsize < t->rd_sz)
  1055. size = true;
  1056. }
  1057. if (t->bm & THRESHOLD_RD_IO) {
  1058. dprintk("%s nfsi->read_io %llu\n", __func__,
  1059. nfsi->read_io);
  1060. io_set = true;
  1061. if (nfsi->read_io < t->rd_io_sz)
  1062. io = true;
  1063. }
  1064. break;
  1065. case IOMODE_RW:
  1066. if (t->bm & THRESHOLD_WR) {
  1067. dprintk("%s fsize %llu\n", __func__, fsize);
  1068. size_set = true;
  1069. if (fsize < t->wr_sz)
  1070. size = true;
  1071. }
  1072. if (t->bm & THRESHOLD_WR_IO) {
  1073. dprintk("%s nfsi->write_io %llu\n", __func__,
  1074. nfsi->write_io);
  1075. io_set = true;
  1076. if (nfsi->write_io < t->wr_io_sz)
  1077. io = true;
  1078. }
  1079. break;
  1080. }
  1081. if (size_set && io_set) {
  1082. if (size && io)
  1083. ret = true;
  1084. } else if (size || io)
  1085. ret = true;
  1086. dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
  1087. return ret;
  1088. }
  1089. /*
  1090. * Layout segment is retreived from the server if not cached.
  1091. * The appropriate layout segment is referenced and returned to the caller.
  1092. */
  1093. struct pnfs_layout_segment *
  1094. pnfs_update_layout(struct inode *ino,
  1095. struct nfs_open_context *ctx,
  1096. loff_t pos,
  1097. u64 count,
  1098. enum pnfs_iomode iomode,
  1099. gfp_t gfp_flags)
  1100. {
  1101. struct pnfs_layout_range arg = {
  1102. .iomode = iomode,
  1103. .offset = pos,
  1104. .length = count,
  1105. };
  1106. unsigned pg_offset;
  1107. struct nfs_server *server = NFS_SERVER(ino);
  1108. struct nfs_client *clp = server->nfs_client;
  1109. struct pnfs_layout_hdr *lo;
  1110. struct pnfs_layout_segment *lseg = NULL;
  1111. bool first;
  1112. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  1113. goto out;
  1114. if (pnfs_within_mdsthreshold(ctx, ino, iomode))
  1115. goto out;
  1116. spin_lock(&ino->i_lock);
  1117. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  1118. if (lo == NULL) {
  1119. spin_unlock(&ino->i_lock);
  1120. goto out;
  1121. }
  1122. /* Do we even need to bother with this? */
  1123. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1124. dprintk("%s matches recall, use MDS\n", __func__);
  1125. goto out_unlock;
  1126. }
  1127. /* if LAYOUTGET already failed once we don't try again */
  1128. if (pnfs_layout_io_test_failed(lo, iomode))
  1129. goto out_unlock;
  1130. /* Check to see if the layout for the given range already exists */
  1131. lseg = pnfs_find_lseg(lo, &arg);
  1132. if (lseg)
  1133. goto out_unlock;
  1134. if (pnfs_layoutgets_blocked(lo, 0))
  1135. goto out_unlock;
  1136. atomic_inc(&lo->plh_outstanding);
  1137. first = list_empty(&lo->plh_layouts) ? true : false;
  1138. spin_unlock(&ino->i_lock);
  1139. if (first) {
  1140. /* The lo must be on the clp list if there is any
  1141. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  1142. */
  1143. spin_lock(&clp->cl_lock);
  1144. list_add_tail(&lo->plh_layouts, &server->layouts);
  1145. spin_unlock(&clp->cl_lock);
  1146. }
  1147. pg_offset = arg.offset & ~PAGE_CACHE_MASK;
  1148. if (pg_offset) {
  1149. arg.offset -= pg_offset;
  1150. arg.length += pg_offset;
  1151. }
  1152. if (arg.length != NFS4_MAX_UINT64)
  1153. arg.length = PAGE_CACHE_ALIGN(arg.length);
  1154. lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
  1155. atomic_dec(&lo->plh_outstanding);
  1156. out_put_layout_hdr:
  1157. pnfs_put_layout_hdr(lo);
  1158. out:
  1159. dprintk("%s: inode %s/%llu pNFS layout segment %s for "
  1160. "(%s, offset: %llu, length: %llu)\n",
  1161. __func__, ino->i_sb->s_id,
  1162. (unsigned long long)NFS_FILEID(ino),
  1163. lseg == NULL ? "not found" : "found",
  1164. iomode==IOMODE_RW ? "read/write" : "read-only",
  1165. (unsigned long long)pos,
  1166. (unsigned long long)count);
  1167. return lseg;
  1168. out_unlock:
  1169. spin_unlock(&ino->i_lock);
  1170. goto out_put_layout_hdr;
  1171. }
  1172. EXPORT_SYMBOL_GPL(pnfs_update_layout);
  1173. struct pnfs_layout_segment *
  1174. pnfs_layout_process(struct nfs4_layoutget *lgp)
  1175. {
  1176. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  1177. struct nfs4_layoutget_res *res = &lgp->res;
  1178. struct pnfs_layout_segment *lseg;
  1179. struct inode *ino = lo->plh_inode;
  1180. LIST_HEAD(free_me);
  1181. int status = 0;
  1182. /* Inject layout blob into I/O device driver */
  1183. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  1184. if (!lseg || IS_ERR(lseg)) {
  1185. if (!lseg)
  1186. status = -ENOMEM;
  1187. else
  1188. status = PTR_ERR(lseg);
  1189. dprintk("%s: Could not allocate layout: error %d\n",
  1190. __func__, status);
  1191. goto out;
  1192. }
  1193. spin_lock(&ino->i_lock);
  1194. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1195. dprintk("%s forget reply due to recall\n", __func__);
  1196. goto out_forget_reply;
  1197. }
  1198. if (pnfs_layoutgets_blocked(lo, 1) ||
  1199. pnfs_layout_stateid_blocked(lo, &res->stateid)) {
  1200. dprintk("%s forget reply due to state\n", __func__);
  1201. goto out_forget_reply;
  1202. }
  1203. /* Check that the new stateid matches the old stateid */
  1204. pnfs_verify_layout_stateid(lo, &res->stateid, &free_me);
  1205. /* Done processing layoutget. Set the layout stateid */
  1206. pnfs_set_layout_stateid(lo, &res->stateid, false);
  1207. init_lseg(lo, lseg);
  1208. lseg->pls_range = res->range;
  1209. pnfs_get_lseg(lseg);
  1210. pnfs_layout_insert_lseg(lo, lseg);
  1211. if (res->return_on_close) {
  1212. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  1213. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  1214. }
  1215. spin_unlock(&ino->i_lock);
  1216. pnfs_free_lseg_list(&free_me);
  1217. return lseg;
  1218. out:
  1219. return ERR_PTR(status);
  1220. out_forget_reply:
  1221. spin_unlock(&ino->i_lock);
  1222. lseg->pls_layout = lo;
  1223. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  1224. goto out;
  1225. }
  1226. void
  1227. pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1228. {
  1229. u64 rd_size = req->wb_bytes;
  1230. WARN_ON_ONCE(pgio->pg_lseg != NULL);
  1231. if (pgio->pg_dreq == NULL)
  1232. rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
  1233. else
  1234. rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  1235. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1236. req->wb_context,
  1237. req_offset(req),
  1238. rd_size,
  1239. IOMODE_READ,
  1240. GFP_KERNEL);
  1241. /* If no lseg, fall back to read through mds */
  1242. if (pgio->pg_lseg == NULL)
  1243. nfs_pageio_reset_read_mds(pgio);
  1244. }
  1245. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
  1246. void
  1247. pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
  1248. struct nfs_page *req, u64 wb_size)
  1249. {
  1250. WARN_ON_ONCE(pgio->pg_lseg != NULL);
  1251. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1252. req->wb_context,
  1253. req_offset(req),
  1254. wb_size,
  1255. IOMODE_RW,
  1256. GFP_NOFS);
  1257. /* If no lseg, fall back to write through mds */
  1258. if (pgio->pg_lseg == NULL)
  1259. nfs_pageio_reset_write_mds(pgio);
  1260. }
  1261. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
  1262. /*
  1263. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  1264. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  1265. */
  1266. size_t
  1267. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1268. struct nfs_page *req)
  1269. {
  1270. unsigned int size;
  1271. u64 seg_end, req_start, seg_left;
  1272. size = nfs_generic_pg_test(pgio, prev, req);
  1273. if (!size)
  1274. return 0;
  1275. /*
  1276. * 'size' contains the number of bytes left in the current page (up
  1277. * to the original size asked for in @req->wb_bytes).
  1278. *
  1279. * Calculate how many bytes are left in the layout segment
  1280. * and if there are less bytes than 'size', return that instead.
  1281. *
  1282. * Please also note that 'end_offset' is actually the offset of the
  1283. * first byte that lies outside the pnfs_layout_range. FIXME?
  1284. *
  1285. */
  1286. if (pgio->pg_lseg) {
  1287. seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
  1288. pgio->pg_lseg->pls_range.length);
  1289. req_start = req_offset(req);
  1290. WARN_ON_ONCE(req_start > seg_end);
  1291. /* start of request is past the last byte of this segment */
  1292. if (req_start >= seg_end)
  1293. return 0;
  1294. /* adjust 'size' iff there are fewer bytes left in the
  1295. * segment than what nfs_generic_pg_test returned */
  1296. seg_left = seg_end - req_start;
  1297. if (seg_left < size)
  1298. size = (unsigned int)seg_left;
  1299. }
  1300. return size;
  1301. }
  1302. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  1303. int pnfs_write_done_resend_to_mds(struct inode *inode,
  1304. struct list_head *head,
  1305. const struct nfs_pgio_completion_ops *compl_ops,
  1306. struct nfs_direct_req *dreq)
  1307. {
  1308. struct nfs_pageio_descriptor pgio;
  1309. LIST_HEAD(failed);
  1310. /* Resend all requests through the MDS */
  1311. nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, true, compl_ops);
  1312. pgio.pg_dreq = dreq;
  1313. while (!list_empty(head)) {
  1314. struct nfs_page *req = nfs_list_entry(head->next);
  1315. nfs_list_remove_request(req);
  1316. if (!nfs_pageio_add_request(&pgio, req))
  1317. nfs_list_add_request(req, &failed);
  1318. }
  1319. nfs_pageio_complete(&pgio);
  1320. if (!list_empty(&failed)) {
  1321. /* For some reason our attempt to resend pages. Mark the
  1322. * overall send request as having failed, and let
  1323. * nfs_writeback_release_full deal with the error.
  1324. */
  1325. list_move(&failed, head);
  1326. return -EIO;
  1327. }
  1328. return 0;
  1329. }
  1330. EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
  1331. static void pnfs_ld_handle_write_error(struct nfs_pgio_data *data)
  1332. {
  1333. struct nfs_pgio_header *hdr = data->header;
  1334. dprintk("pnfs write error = %d\n", hdr->pnfs_error);
  1335. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1336. PNFS_LAYOUTRET_ON_ERROR) {
  1337. pnfs_return_layout(hdr->inode);
  1338. }
  1339. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1340. data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
  1341. &hdr->pages,
  1342. hdr->completion_ops,
  1343. hdr->dreq);
  1344. }
  1345. /*
  1346. * Called by non rpc-based layout drivers
  1347. */
  1348. void pnfs_ld_write_done(struct nfs_pgio_data *data)
  1349. {
  1350. struct nfs_pgio_header *hdr = data->header;
  1351. trace_nfs4_pnfs_write(data, hdr->pnfs_error);
  1352. if (!hdr->pnfs_error) {
  1353. pnfs_set_layoutcommit(data);
  1354. hdr->mds_ops->rpc_call_done(&data->task, data);
  1355. } else
  1356. pnfs_ld_handle_write_error(data);
  1357. hdr->mds_ops->rpc_release(data);
  1358. }
  1359. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1360. static void
  1361. pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
  1362. struct nfs_pgio_data *data)
  1363. {
  1364. struct nfs_pgio_header *hdr = data->header;
  1365. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1366. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1367. nfs_pageio_reset_write_mds(desc);
  1368. desc->pg_recoalesce = 1;
  1369. }
  1370. nfs_pgio_data_release(data);
  1371. }
  1372. static enum pnfs_try_status
  1373. pnfs_try_to_write_data(struct nfs_pgio_data *wdata,
  1374. const struct rpc_call_ops *call_ops,
  1375. struct pnfs_layout_segment *lseg,
  1376. int how)
  1377. {
  1378. struct nfs_pgio_header *hdr = wdata->header;
  1379. struct inode *inode = hdr->inode;
  1380. enum pnfs_try_status trypnfs;
  1381. struct nfs_server *nfss = NFS_SERVER(inode);
  1382. hdr->mds_ops = call_ops;
  1383. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1384. inode->i_ino, wdata->args.count, wdata->args.offset, how);
  1385. trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
  1386. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1387. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1388. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1389. return trypnfs;
  1390. }
  1391. static void
  1392. pnfs_do_write(struct nfs_pageio_descriptor *desc,
  1393. struct nfs_pgio_header *hdr, int how)
  1394. {
  1395. struct nfs_pgio_data *data = hdr->data;
  1396. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1397. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1398. enum pnfs_try_status trypnfs;
  1399. desc->pg_lseg = NULL;
  1400. trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
  1401. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1402. pnfs_write_through_mds(desc, data);
  1403. pnfs_put_lseg(lseg);
  1404. }
  1405. static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
  1406. {
  1407. pnfs_put_lseg(hdr->lseg);
  1408. nfs_rw_header_free(hdr);
  1409. }
  1410. EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
  1411. int
  1412. pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
  1413. {
  1414. struct nfs_rw_header *whdr;
  1415. struct nfs_pgio_header *hdr;
  1416. int ret;
  1417. whdr = nfs_rw_header_alloc(desc->pg_rw_ops);
  1418. if (!whdr) {
  1419. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1420. pnfs_put_lseg(desc->pg_lseg);
  1421. desc->pg_lseg = NULL;
  1422. return -ENOMEM;
  1423. }
  1424. hdr = &whdr->header;
  1425. nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
  1426. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1427. atomic_inc(&hdr->refcnt);
  1428. ret = nfs_generic_pgio(desc, hdr);
  1429. if (ret != 0) {
  1430. pnfs_put_lseg(desc->pg_lseg);
  1431. desc->pg_lseg = NULL;
  1432. } else
  1433. pnfs_do_write(desc, hdr, desc->pg_ioflags);
  1434. if (atomic_dec_and_test(&hdr->refcnt))
  1435. hdr->completion_ops->completion(hdr);
  1436. return ret;
  1437. }
  1438. EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
  1439. int pnfs_read_done_resend_to_mds(struct inode *inode,
  1440. struct list_head *head,
  1441. const struct nfs_pgio_completion_ops *compl_ops,
  1442. struct nfs_direct_req *dreq)
  1443. {
  1444. struct nfs_pageio_descriptor pgio;
  1445. LIST_HEAD(failed);
  1446. /* Resend all requests through the MDS */
  1447. nfs_pageio_init_read(&pgio, inode, true, compl_ops);
  1448. pgio.pg_dreq = dreq;
  1449. while (!list_empty(head)) {
  1450. struct nfs_page *req = nfs_list_entry(head->next);
  1451. nfs_list_remove_request(req);
  1452. if (!nfs_pageio_add_request(&pgio, req))
  1453. nfs_list_add_request(req, &failed);
  1454. }
  1455. nfs_pageio_complete(&pgio);
  1456. if (!list_empty(&failed)) {
  1457. list_move(&failed, head);
  1458. return -EIO;
  1459. }
  1460. return 0;
  1461. }
  1462. EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
  1463. static void pnfs_ld_handle_read_error(struct nfs_pgio_data *data)
  1464. {
  1465. struct nfs_pgio_header *hdr = data->header;
  1466. dprintk("pnfs read error = %d\n", hdr->pnfs_error);
  1467. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1468. PNFS_LAYOUTRET_ON_ERROR) {
  1469. pnfs_return_layout(hdr->inode);
  1470. }
  1471. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1472. data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
  1473. &hdr->pages,
  1474. hdr->completion_ops,
  1475. hdr->dreq);
  1476. }
  1477. /*
  1478. * Called by non rpc-based layout drivers
  1479. */
  1480. void pnfs_ld_read_done(struct nfs_pgio_data *data)
  1481. {
  1482. struct nfs_pgio_header *hdr = data->header;
  1483. trace_nfs4_pnfs_read(data, hdr->pnfs_error);
  1484. if (likely(!hdr->pnfs_error)) {
  1485. __nfs4_read_done_cb(data);
  1486. hdr->mds_ops->rpc_call_done(&data->task, data);
  1487. } else
  1488. pnfs_ld_handle_read_error(data);
  1489. hdr->mds_ops->rpc_release(data);
  1490. }
  1491. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1492. static void
  1493. pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
  1494. struct nfs_pgio_data *data)
  1495. {
  1496. struct nfs_pgio_header *hdr = data->header;
  1497. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1498. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1499. nfs_pageio_reset_read_mds(desc);
  1500. desc->pg_recoalesce = 1;
  1501. }
  1502. nfs_pgio_data_release(data);
  1503. }
  1504. /*
  1505. * Call the appropriate parallel I/O subsystem read function.
  1506. */
  1507. static enum pnfs_try_status
  1508. pnfs_try_to_read_data(struct nfs_pgio_data *rdata,
  1509. const struct rpc_call_ops *call_ops,
  1510. struct pnfs_layout_segment *lseg)
  1511. {
  1512. struct nfs_pgio_header *hdr = rdata->header;
  1513. struct inode *inode = hdr->inode;
  1514. struct nfs_server *nfss = NFS_SERVER(inode);
  1515. enum pnfs_try_status trypnfs;
  1516. hdr->mds_ops = call_ops;
  1517. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1518. __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
  1519. trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
  1520. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1521. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1522. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1523. return trypnfs;
  1524. }
  1525. static void
  1526. pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
  1527. {
  1528. struct nfs_pgio_data *data = hdr->data;
  1529. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1530. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1531. enum pnfs_try_status trypnfs;
  1532. desc->pg_lseg = NULL;
  1533. trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
  1534. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1535. pnfs_read_through_mds(desc, data);
  1536. pnfs_put_lseg(lseg);
  1537. }
  1538. static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
  1539. {
  1540. pnfs_put_lseg(hdr->lseg);
  1541. nfs_rw_header_free(hdr);
  1542. }
  1543. EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
  1544. int
  1545. pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  1546. {
  1547. struct nfs_rw_header *rhdr;
  1548. struct nfs_pgio_header *hdr;
  1549. int ret;
  1550. rhdr = nfs_rw_header_alloc(desc->pg_rw_ops);
  1551. if (!rhdr) {
  1552. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1553. ret = -ENOMEM;
  1554. pnfs_put_lseg(desc->pg_lseg);
  1555. desc->pg_lseg = NULL;
  1556. return ret;
  1557. }
  1558. hdr = &rhdr->header;
  1559. nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
  1560. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1561. atomic_inc(&hdr->refcnt);
  1562. ret = nfs_generic_pgio(desc, hdr);
  1563. if (ret != 0) {
  1564. pnfs_put_lseg(desc->pg_lseg);
  1565. desc->pg_lseg = NULL;
  1566. } else
  1567. pnfs_do_read(desc, hdr);
  1568. if (atomic_dec_and_test(&hdr->refcnt))
  1569. hdr->completion_ops->completion(hdr);
  1570. return ret;
  1571. }
  1572. EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
  1573. static void pnfs_clear_layoutcommitting(struct inode *inode)
  1574. {
  1575. unsigned long *bitlock = &NFS_I(inode)->flags;
  1576. clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
  1577. smp_mb__after_atomic();
  1578. wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
  1579. }
  1580. /*
  1581. * There can be multiple RW segments.
  1582. */
  1583. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1584. {
  1585. struct pnfs_layout_segment *lseg;
  1586. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1587. if (lseg->pls_range.iomode == IOMODE_RW &&
  1588. test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1589. list_add(&lseg->pls_lc_list, listp);
  1590. }
  1591. }
  1592. static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
  1593. {
  1594. struct pnfs_layout_segment *lseg, *tmp;
  1595. /* Matched by references in pnfs_set_layoutcommit */
  1596. list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
  1597. list_del_init(&lseg->pls_lc_list);
  1598. pnfs_put_lseg(lseg);
  1599. }
  1600. pnfs_clear_layoutcommitting(inode);
  1601. }
  1602. void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
  1603. {
  1604. pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
  1605. }
  1606. EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
  1607. void
  1608. pnfs_set_layoutcommit(struct nfs_pgio_data *wdata)
  1609. {
  1610. struct nfs_pgio_header *hdr = wdata->header;
  1611. struct inode *inode = hdr->inode;
  1612. struct nfs_inode *nfsi = NFS_I(inode);
  1613. loff_t end_pos = wdata->mds_offset + wdata->res.count;
  1614. bool mark_as_dirty = false;
  1615. spin_lock(&inode->i_lock);
  1616. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1617. mark_as_dirty = true;
  1618. dprintk("%s: Set layoutcommit for inode %lu ",
  1619. __func__, inode->i_ino);
  1620. }
  1621. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
  1622. /* references matched in nfs4_layoutcommit_release */
  1623. pnfs_get_lseg(hdr->lseg);
  1624. }
  1625. if (end_pos > nfsi->layout->plh_lwb)
  1626. nfsi->layout->plh_lwb = end_pos;
  1627. spin_unlock(&inode->i_lock);
  1628. dprintk("%s: lseg %p end_pos %llu\n",
  1629. __func__, hdr->lseg, nfsi->layout->plh_lwb);
  1630. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1631. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1632. if (mark_as_dirty)
  1633. mark_inode_dirty_sync(inode);
  1634. }
  1635. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  1636. void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
  1637. {
  1638. struct nfs_server *nfss = NFS_SERVER(data->args.inode);
  1639. if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
  1640. nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
  1641. pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
  1642. }
  1643. /*
  1644. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  1645. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  1646. * data to disk to allow the server to recover the data if it crashes.
  1647. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  1648. * is off, and a COMMIT is sent to a data server, or
  1649. * if WRITEs to a data server return NFS_DATA_SYNC.
  1650. */
  1651. int
  1652. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  1653. {
  1654. struct nfs4_layoutcommit_data *data;
  1655. struct nfs_inode *nfsi = NFS_I(inode);
  1656. loff_t end_pos;
  1657. int status;
  1658. if (!pnfs_layoutcommit_outstanding(inode))
  1659. return 0;
  1660. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  1661. status = -EAGAIN;
  1662. if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
  1663. if (!sync)
  1664. goto out;
  1665. status = wait_on_bit_lock(&nfsi->flags,
  1666. NFS_INO_LAYOUTCOMMITTING,
  1667. nfs_wait_bit_killable,
  1668. TASK_KILLABLE);
  1669. if (status)
  1670. goto out;
  1671. }
  1672. status = -ENOMEM;
  1673. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  1674. data = kzalloc(sizeof(*data), GFP_NOFS);
  1675. if (!data)
  1676. goto clear_layoutcommitting;
  1677. status = 0;
  1678. spin_lock(&inode->i_lock);
  1679. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1680. goto out_unlock;
  1681. INIT_LIST_HEAD(&data->lseg_list);
  1682. pnfs_list_write_lseg(inode, &data->lseg_list);
  1683. end_pos = nfsi->layout->plh_lwb;
  1684. nfsi->layout->plh_lwb = 0;
  1685. nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
  1686. spin_unlock(&inode->i_lock);
  1687. data->args.inode = inode;
  1688. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  1689. nfs_fattr_init(&data->fattr);
  1690. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  1691. data->res.fattr = &data->fattr;
  1692. data->args.lastbytewritten = end_pos - 1;
  1693. data->res.server = NFS_SERVER(inode);
  1694. status = nfs4_proc_layoutcommit(data, sync);
  1695. out:
  1696. if (status)
  1697. mark_inode_dirty_sync(inode);
  1698. dprintk("<-- %s status %d\n", __func__, status);
  1699. return status;
  1700. out_unlock:
  1701. spin_unlock(&inode->i_lock);
  1702. kfree(data);
  1703. clear_layoutcommitting:
  1704. pnfs_clear_layoutcommitting(inode);
  1705. goto out;
  1706. }
  1707. struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
  1708. {
  1709. struct nfs4_threshold *thp;
  1710. thp = kzalloc(sizeof(*thp), GFP_NOFS);
  1711. if (!thp) {
  1712. dprintk("%s mdsthreshold allocation failed\n", __func__);
  1713. return NULL;
  1714. }
  1715. return thp;
  1716. }