pnfs.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include <linux/nfs_page.h>
  31. #include <linux/module.h>
  32. #include "internal.h"
  33. #include "pnfs.h"
  34. #include "iostat.h"
  35. #include "nfs4trace.h"
  36. #include "delegation.h"
  37. #include "nfs42.h"
  38. #define NFSDBG_FACILITY NFSDBG_PNFS
  39. #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  40. /* Locking:
  41. *
  42. * pnfs_spinlock:
  43. * protects pnfs_modules_tbl.
  44. */
  45. static DEFINE_SPINLOCK(pnfs_spinlock);
  46. /*
  47. * pnfs_modules_tbl holds all pnfs modules
  48. */
  49. static LIST_HEAD(pnfs_modules_tbl);
  50. static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
  51. /* Return the registered pnfs layout driver module matching given id */
  52. static struct pnfs_layoutdriver_type *
  53. find_pnfs_driver_locked(u32 id)
  54. {
  55. struct pnfs_layoutdriver_type *local;
  56. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  57. if (local->id == id)
  58. goto out;
  59. local = NULL;
  60. out:
  61. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  62. return local;
  63. }
  64. static struct pnfs_layoutdriver_type *
  65. find_pnfs_driver(u32 id)
  66. {
  67. struct pnfs_layoutdriver_type *local;
  68. spin_lock(&pnfs_spinlock);
  69. local = find_pnfs_driver_locked(id);
  70. if (local != NULL && !try_module_get(local->owner)) {
  71. dprintk("%s: Could not grab reference on module\n", __func__);
  72. local = NULL;
  73. }
  74. spin_unlock(&pnfs_spinlock);
  75. return local;
  76. }
  77. void
  78. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  79. {
  80. if (nfss->pnfs_curr_ld) {
  81. if (nfss->pnfs_curr_ld->clear_layoutdriver)
  82. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  83. /* Decrement the MDS count. Purge the deviceid cache if zero */
  84. if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  85. nfs4_deviceid_purge_client(nfss->nfs_client);
  86. module_put(nfss->pnfs_curr_ld->owner);
  87. }
  88. nfss->pnfs_curr_ld = NULL;
  89. }
  90. /*
  91. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  92. * Currently only one pNFS layout driver per filesystem is supported.
  93. *
  94. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  95. */
  96. void
  97. set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
  98. u32 id)
  99. {
  100. struct pnfs_layoutdriver_type *ld_type = NULL;
  101. if (id == 0)
  102. goto out_no_driver;
  103. if (!(server->nfs_client->cl_exchange_flags &
  104. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  105. printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
  106. __func__, id, server->nfs_client->cl_exchange_flags);
  107. goto out_no_driver;
  108. }
  109. ld_type = find_pnfs_driver(id);
  110. if (!ld_type) {
  111. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  112. ld_type = find_pnfs_driver(id);
  113. if (!ld_type) {
  114. dprintk("%s: No pNFS module found for %u.\n",
  115. __func__, id);
  116. goto out_no_driver;
  117. }
  118. }
  119. server->pnfs_curr_ld = ld_type;
  120. if (ld_type->set_layoutdriver
  121. && ld_type->set_layoutdriver(server, mntfh)) {
  122. printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
  123. "driver %u.\n", __func__, id);
  124. module_put(ld_type->owner);
  125. goto out_no_driver;
  126. }
  127. /* Bump the MDS count */
  128. atomic_inc(&server->nfs_client->cl_mds_count);
  129. dprintk("%s: pNFS module for %u set\n", __func__, id);
  130. return;
  131. out_no_driver:
  132. dprintk("%s: Using NFSv4 I/O\n", __func__);
  133. server->pnfs_curr_ld = NULL;
  134. }
  135. int
  136. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  137. {
  138. int status = -EINVAL;
  139. struct pnfs_layoutdriver_type *tmp;
  140. if (ld_type->id == 0) {
  141. printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
  142. return status;
  143. }
  144. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  145. printk(KERN_ERR "NFS: %s Layout driver must provide "
  146. "alloc_lseg and free_lseg.\n", __func__);
  147. return status;
  148. }
  149. spin_lock(&pnfs_spinlock);
  150. tmp = find_pnfs_driver_locked(ld_type->id);
  151. if (!tmp) {
  152. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  153. status = 0;
  154. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  155. ld_type->name);
  156. } else {
  157. printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
  158. __func__, ld_type->id);
  159. }
  160. spin_unlock(&pnfs_spinlock);
  161. return status;
  162. }
  163. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  164. void
  165. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  166. {
  167. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  168. spin_lock(&pnfs_spinlock);
  169. list_del(&ld_type->pnfs_tblid);
  170. spin_unlock(&pnfs_spinlock);
  171. }
  172. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  173. /*
  174. * pNFS client layout cache
  175. */
  176. /* Need to hold i_lock if caller does not already hold reference */
  177. void
  178. pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
  179. {
  180. atomic_inc(&lo->plh_refcount);
  181. }
  182. static struct pnfs_layout_hdr *
  183. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  184. {
  185. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  186. return ld->alloc_layout_hdr(ino, gfp_flags);
  187. }
  188. static void
  189. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  190. {
  191. struct nfs_server *server = NFS_SERVER(lo->plh_inode);
  192. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  193. if (!list_empty(&lo->plh_layouts)) {
  194. struct nfs_client *clp = server->nfs_client;
  195. spin_lock(&clp->cl_lock);
  196. list_del_init(&lo->plh_layouts);
  197. spin_unlock(&clp->cl_lock);
  198. }
  199. put_rpccred(lo->plh_lc_cred);
  200. return ld->free_layout_hdr(lo);
  201. }
  202. static void
  203. pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
  204. {
  205. struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
  206. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  207. nfsi->layout = NULL;
  208. /* Reset MDS Threshold I/O counters */
  209. nfsi->write_io = 0;
  210. nfsi->read_io = 0;
  211. }
  212. void
  213. pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
  214. {
  215. struct inode *inode = lo->plh_inode;
  216. pnfs_layoutreturn_before_put_layout_hdr(lo);
  217. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  218. if (!list_empty(&lo->plh_segs))
  219. WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
  220. pnfs_detach_layout_hdr(lo);
  221. spin_unlock(&inode->i_lock);
  222. pnfs_free_layout_hdr(lo);
  223. }
  224. }
  225. /*
  226. * Mark a pnfs_layout_hdr and all associated layout segments as invalid
  227. *
  228. * In order to continue using the pnfs_layout_hdr, a full recovery
  229. * is required.
  230. * Note that caller must hold inode->i_lock.
  231. */
  232. static int
  233. pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
  234. struct list_head *lseg_list)
  235. {
  236. struct pnfs_layout_range range = {
  237. .iomode = IOMODE_ANY,
  238. .offset = 0,
  239. .length = NFS4_MAX_UINT64,
  240. };
  241. set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
  242. return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
  243. }
  244. static int
  245. pnfs_iomode_to_fail_bit(u32 iomode)
  246. {
  247. return iomode == IOMODE_RW ?
  248. NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
  249. }
  250. static void
  251. pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  252. {
  253. lo->plh_retry_timestamp = jiffies;
  254. if (!test_and_set_bit(fail_bit, &lo->plh_flags))
  255. atomic_inc(&lo->plh_refcount);
  256. }
  257. static void
  258. pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  259. {
  260. if (test_and_clear_bit(fail_bit, &lo->plh_flags))
  261. atomic_dec(&lo->plh_refcount);
  262. }
  263. static void
  264. pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  265. {
  266. struct inode *inode = lo->plh_inode;
  267. struct pnfs_layout_range range = {
  268. .iomode = iomode,
  269. .offset = 0,
  270. .length = NFS4_MAX_UINT64,
  271. };
  272. LIST_HEAD(head);
  273. spin_lock(&inode->i_lock);
  274. pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  275. pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
  276. spin_unlock(&inode->i_lock);
  277. pnfs_free_lseg_list(&head);
  278. dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
  279. iomode == IOMODE_RW ? "RW" : "READ");
  280. }
  281. static bool
  282. pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  283. {
  284. unsigned long start, end;
  285. int fail_bit = pnfs_iomode_to_fail_bit(iomode);
  286. if (test_bit(fail_bit, &lo->plh_flags) == 0)
  287. return false;
  288. end = jiffies;
  289. start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
  290. if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
  291. /* It is time to retry the failed layoutgets */
  292. pnfs_layout_clear_fail_bit(lo, fail_bit);
  293. return false;
  294. }
  295. return true;
  296. }
  297. static void
  298. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  299. {
  300. INIT_LIST_HEAD(&lseg->pls_list);
  301. INIT_LIST_HEAD(&lseg->pls_lc_list);
  302. atomic_set(&lseg->pls_refcount, 1);
  303. smp_mb();
  304. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  305. lseg->pls_layout = lo;
  306. }
  307. static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
  308. {
  309. struct inode *ino = lseg->pls_layout->plh_inode;
  310. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  311. }
  312. static void
  313. pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
  314. struct pnfs_layout_segment *lseg)
  315. {
  316. struct inode *inode = lo->plh_inode;
  317. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  318. list_del_init(&lseg->pls_list);
  319. /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
  320. atomic_dec(&lo->plh_refcount);
  321. if (list_empty(&lo->plh_segs))
  322. clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  323. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  324. }
  325. void
  326. pnfs_put_lseg(struct pnfs_layout_segment *lseg)
  327. {
  328. struct pnfs_layout_hdr *lo;
  329. struct inode *inode;
  330. if (!lseg)
  331. return;
  332. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  333. atomic_read(&lseg->pls_refcount),
  334. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  335. lo = lseg->pls_layout;
  336. inode = lo->plh_inode;
  337. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  338. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  339. spin_unlock(&inode->i_lock);
  340. return;
  341. }
  342. pnfs_get_layout_hdr(lo);
  343. pnfs_layout_remove_lseg(lo, lseg);
  344. spin_unlock(&inode->i_lock);
  345. pnfs_free_lseg(lseg);
  346. pnfs_put_layout_hdr(lo);
  347. }
  348. }
  349. EXPORT_SYMBOL_GPL(pnfs_put_lseg);
  350. static void pnfs_free_lseg_async_work(struct work_struct *work)
  351. {
  352. struct pnfs_layout_segment *lseg;
  353. struct pnfs_layout_hdr *lo;
  354. lseg = container_of(work, struct pnfs_layout_segment, pls_work);
  355. lo = lseg->pls_layout;
  356. pnfs_free_lseg(lseg);
  357. pnfs_put_layout_hdr(lo);
  358. }
  359. static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
  360. {
  361. INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
  362. schedule_work(&lseg->pls_work);
  363. }
  364. void
  365. pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
  366. {
  367. if (!lseg)
  368. return;
  369. assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
  370. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  371. atomic_read(&lseg->pls_refcount),
  372. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  373. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  374. struct pnfs_layout_hdr *lo = lseg->pls_layout;
  375. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
  376. return;
  377. pnfs_get_layout_hdr(lo);
  378. pnfs_layout_remove_lseg(lo, lseg);
  379. pnfs_free_lseg_async(lseg);
  380. }
  381. }
  382. EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
  383. static u64
  384. end_offset(u64 start, u64 len)
  385. {
  386. u64 end;
  387. end = start + len;
  388. return end >= start ? end : NFS4_MAX_UINT64;
  389. }
  390. /*
  391. * is l2 fully contained in l1?
  392. * start1 end1
  393. * [----------------------------------)
  394. * start2 end2
  395. * [----------------)
  396. */
  397. static bool
  398. pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
  399. const struct pnfs_layout_range *l2)
  400. {
  401. u64 start1 = l1->offset;
  402. u64 end1 = end_offset(start1, l1->length);
  403. u64 start2 = l2->offset;
  404. u64 end2 = end_offset(start2, l2->length);
  405. return (start1 <= start2) && (end1 >= end2);
  406. }
  407. /*
  408. * is l1 and l2 intersecting?
  409. * start1 end1
  410. * [----------------------------------)
  411. * start2 end2
  412. * [----------------)
  413. */
  414. static bool
  415. pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
  416. const struct pnfs_layout_range *l2)
  417. {
  418. u64 start1 = l1->offset;
  419. u64 end1 = end_offset(start1, l1->length);
  420. u64 start2 = l2->offset;
  421. u64 end2 = end_offset(start2, l2->length);
  422. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  423. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  424. }
  425. static bool
  426. should_free_lseg(const struct pnfs_layout_range *lseg_range,
  427. const struct pnfs_layout_range *recall_range)
  428. {
  429. return (recall_range->iomode == IOMODE_ANY ||
  430. lseg_range->iomode == recall_range->iomode) &&
  431. pnfs_lseg_range_intersecting(lseg_range, recall_range);
  432. }
  433. static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
  434. struct list_head *tmp_list)
  435. {
  436. if (!atomic_dec_and_test(&lseg->pls_refcount))
  437. return false;
  438. pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
  439. list_add(&lseg->pls_list, tmp_list);
  440. return true;
  441. }
  442. /* Returns 1 if lseg is removed from list, 0 otherwise */
  443. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  444. struct list_head *tmp_list)
  445. {
  446. int rv = 0;
  447. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  448. /* Remove the reference keeping the lseg in the
  449. * list. It will now be removed when all
  450. * outstanding io is finished.
  451. */
  452. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  453. atomic_read(&lseg->pls_refcount));
  454. if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
  455. rv = 1;
  456. }
  457. return rv;
  458. }
  459. /*
  460. * Compare 2 layout stateid sequence ids, to see which is newer,
  461. * taking into account wraparound issues.
  462. */
  463. static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
  464. {
  465. return (s32)(s1 - s2) > 0;
  466. }
  467. /**
  468. * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
  469. * @lo: layout header containing the lsegs
  470. * @tmp_list: list head where doomed lsegs should go
  471. * @recall_range: optional recall range argument to match (may be NULL)
  472. * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
  473. *
  474. * Walk the list of lsegs in the layout header, and tear down any that should
  475. * be destroyed. If "recall_range" is specified then the segment must match
  476. * that range. If "seq" is non-zero, then only match segments that were handed
  477. * out at or before that sequence.
  478. *
  479. * Returns number of matching invalid lsegs remaining in list after scanning
  480. * it and purging them.
  481. */
  482. int
  483. pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  484. struct list_head *tmp_list,
  485. const struct pnfs_layout_range *recall_range,
  486. u32 seq)
  487. {
  488. struct pnfs_layout_segment *lseg, *next;
  489. int remaining = 0;
  490. dprintk("%s:Begin lo %p\n", __func__, lo);
  491. if (list_empty(&lo->plh_segs))
  492. return 0;
  493. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  494. if (!recall_range ||
  495. should_free_lseg(&lseg->pls_range, recall_range)) {
  496. if (seq && pnfs_seqid_is_newer(lseg->pls_seq, seq))
  497. continue;
  498. dprintk("%s: freeing lseg %p iomode %d seq %u"
  499. "offset %llu length %llu\n", __func__,
  500. lseg, lseg->pls_range.iomode, lseg->pls_seq,
  501. lseg->pls_range.offset, lseg->pls_range.length);
  502. if (!mark_lseg_invalid(lseg, tmp_list))
  503. remaining++;
  504. }
  505. dprintk("%s:Return %i\n", __func__, remaining);
  506. return remaining;
  507. }
  508. /* note free_me must contain lsegs from a single layout_hdr */
  509. void
  510. pnfs_free_lseg_list(struct list_head *free_me)
  511. {
  512. struct pnfs_layout_segment *lseg, *tmp;
  513. if (list_empty(free_me))
  514. return;
  515. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  516. list_del(&lseg->pls_list);
  517. pnfs_free_lseg(lseg);
  518. }
  519. }
  520. void
  521. pnfs_destroy_layout(struct nfs_inode *nfsi)
  522. {
  523. struct pnfs_layout_hdr *lo;
  524. LIST_HEAD(tmp_list);
  525. spin_lock(&nfsi->vfs_inode.i_lock);
  526. lo = nfsi->layout;
  527. if (lo) {
  528. pnfs_get_layout_hdr(lo);
  529. pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
  530. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
  531. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
  532. spin_unlock(&nfsi->vfs_inode.i_lock);
  533. pnfs_free_lseg_list(&tmp_list);
  534. pnfs_put_layout_hdr(lo);
  535. } else
  536. spin_unlock(&nfsi->vfs_inode.i_lock);
  537. }
  538. EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
  539. static bool
  540. pnfs_layout_add_bulk_destroy_list(struct inode *inode,
  541. struct list_head *layout_list)
  542. {
  543. struct pnfs_layout_hdr *lo;
  544. bool ret = false;
  545. spin_lock(&inode->i_lock);
  546. lo = NFS_I(inode)->layout;
  547. if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
  548. pnfs_get_layout_hdr(lo);
  549. list_add(&lo->plh_bulk_destroy, layout_list);
  550. ret = true;
  551. }
  552. spin_unlock(&inode->i_lock);
  553. return ret;
  554. }
  555. /* Caller must hold rcu_read_lock and clp->cl_lock */
  556. static int
  557. pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
  558. struct nfs_server *server,
  559. struct list_head *layout_list)
  560. {
  561. struct pnfs_layout_hdr *lo, *next;
  562. struct inode *inode;
  563. list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
  564. inode = igrab(lo->plh_inode);
  565. if (inode == NULL)
  566. continue;
  567. list_del_init(&lo->plh_layouts);
  568. if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
  569. continue;
  570. rcu_read_unlock();
  571. spin_unlock(&clp->cl_lock);
  572. iput(inode);
  573. spin_lock(&clp->cl_lock);
  574. rcu_read_lock();
  575. return -EAGAIN;
  576. }
  577. return 0;
  578. }
  579. static int
  580. pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
  581. bool is_bulk_recall)
  582. {
  583. struct pnfs_layout_hdr *lo;
  584. struct inode *inode;
  585. LIST_HEAD(lseg_list);
  586. int ret = 0;
  587. while (!list_empty(layout_list)) {
  588. lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
  589. plh_bulk_destroy);
  590. dprintk("%s freeing layout for inode %lu\n", __func__,
  591. lo->plh_inode->i_ino);
  592. inode = lo->plh_inode;
  593. pnfs_layoutcommit_inode(inode, false);
  594. spin_lock(&inode->i_lock);
  595. list_del_init(&lo->plh_bulk_destroy);
  596. if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
  597. if (is_bulk_recall)
  598. set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  599. ret = -EAGAIN;
  600. }
  601. spin_unlock(&inode->i_lock);
  602. pnfs_free_lseg_list(&lseg_list);
  603. /* Free all lsegs that are attached to commit buckets */
  604. nfs_commit_inode(inode, 0);
  605. pnfs_put_layout_hdr(lo);
  606. iput(inode);
  607. }
  608. return ret;
  609. }
  610. int
  611. pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
  612. struct nfs_fsid *fsid,
  613. bool is_recall)
  614. {
  615. struct nfs_server *server;
  616. LIST_HEAD(layout_list);
  617. spin_lock(&clp->cl_lock);
  618. rcu_read_lock();
  619. restart:
  620. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  621. if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
  622. continue;
  623. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  624. server,
  625. &layout_list) != 0)
  626. goto restart;
  627. }
  628. rcu_read_unlock();
  629. spin_unlock(&clp->cl_lock);
  630. if (list_empty(&layout_list))
  631. return 0;
  632. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  633. }
  634. int
  635. pnfs_destroy_layouts_byclid(struct nfs_client *clp,
  636. bool is_recall)
  637. {
  638. struct nfs_server *server;
  639. LIST_HEAD(layout_list);
  640. spin_lock(&clp->cl_lock);
  641. rcu_read_lock();
  642. restart:
  643. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  644. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  645. server,
  646. &layout_list) != 0)
  647. goto restart;
  648. }
  649. rcu_read_unlock();
  650. spin_unlock(&clp->cl_lock);
  651. if (list_empty(&layout_list))
  652. return 0;
  653. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  654. }
  655. /*
  656. * Called by the state manger to remove all layouts established under an
  657. * expired lease.
  658. */
  659. void
  660. pnfs_destroy_all_layouts(struct nfs_client *clp)
  661. {
  662. nfs4_deviceid_mark_client_invalid(clp);
  663. nfs4_deviceid_purge_client(clp);
  664. pnfs_destroy_layouts_byclid(clp, false);
  665. }
  666. /* update lo->plh_stateid with new if is more recent */
  667. void
  668. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  669. bool update_barrier)
  670. {
  671. u32 oldseq, newseq, new_barrier;
  672. int empty = list_empty(&lo->plh_segs);
  673. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  674. newseq = be32_to_cpu(new->seqid);
  675. if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
  676. nfs4_stateid_copy(&lo->plh_stateid, new);
  677. if (update_barrier) {
  678. new_barrier = be32_to_cpu(new->seqid);
  679. } else {
  680. /* Because of wraparound, we want to keep the barrier
  681. * "close" to the current seqids.
  682. */
  683. new_barrier = newseq - atomic_read(&lo->plh_outstanding);
  684. }
  685. if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
  686. lo->plh_barrier = new_barrier;
  687. }
  688. }
  689. static bool
  690. pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
  691. const nfs4_stateid *stateid)
  692. {
  693. u32 seqid = be32_to_cpu(stateid->seqid);
  694. return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
  695. }
  696. /* lget is set to 1 if called from inside send_layoutget call chain */
  697. static bool
  698. pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
  699. {
  700. return lo->plh_block_lgets ||
  701. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  702. }
  703. /*
  704. * Get layout from server.
  705. * for now, assume that whole file layouts are requested.
  706. * arg->offset: 0
  707. * arg->length: all ones
  708. */
  709. static struct pnfs_layout_segment *
  710. send_layoutget(struct pnfs_layout_hdr *lo,
  711. struct nfs_open_context *ctx,
  712. nfs4_stateid *stateid,
  713. const struct pnfs_layout_range *range,
  714. long *timeout, gfp_t gfp_flags)
  715. {
  716. struct inode *ino = lo->plh_inode;
  717. struct nfs_server *server = NFS_SERVER(ino);
  718. struct nfs4_layoutget *lgp;
  719. loff_t i_size;
  720. dprintk("--> %s\n", __func__);
  721. /*
  722. * Synchronously retrieve layout information from server and
  723. * store in lseg. If we race with a concurrent seqid morphing
  724. * op, then re-send the LAYOUTGET.
  725. */
  726. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  727. if (lgp == NULL)
  728. return ERR_PTR(-ENOMEM);
  729. i_size = i_size_read(ino);
  730. lgp->args.minlength = PAGE_SIZE;
  731. if (lgp->args.minlength > range->length)
  732. lgp->args.minlength = range->length;
  733. if (range->iomode == IOMODE_READ) {
  734. if (range->offset >= i_size)
  735. lgp->args.minlength = 0;
  736. else if (i_size - range->offset < lgp->args.minlength)
  737. lgp->args.minlength = i_size - range->offset;
  738. }
  739. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  740. pnfs_copy_range(&lgp->args.range, range);
  741. lgp->args.type = server->pnfs_curr_ld->id;
  742. lgp->args.inode = ino;
  743. lgp->args.ctx = get_nfs_open_context(ctx);
  744. nfs4_stateid_copy(&lgp->args.stateid, stateid);
  745. lgp->gfp_flags = gfp_flags;
  746. lgp->cred = lo->plh_lc_cred;
  747. return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
  748. }
  749. static void pnfs_clear_layoutcommit(struct inode *inode,
  750. struct list_head *head)
  751. {
  752. struct nfs_inode *nfsi = NFS_I(inode);
  753. struct pnfs_layout_segment *lseg, *tmp;
  754. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  755. return;
  756. list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
  757. if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  758. continue;
  759. pnfs_lseg_dec_and_remove_zero(lseg, head);
  760. }
  761. }
  762. void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
  763. {
  764. clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
  765. smp_mb__after_atomic();
  766. wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
  767. rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
  768. }
  769. static bool
  770. pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
  771. {
  772. if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
  773. return false;
  774. lo->plh_return_iomode = 0;
  775. lo->plh_return_seq = 0;
  776. pnfs_get_layout_hdr(lo);
  777. clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
  778. return true;
  779. }
  780. static int
  781. pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
  782. enum pnfs_iomode iomode, bool sync)
  783. {
  784. struct inode *ino = lo->plh_inode;
  785. struct nfs4_layoutreturn *lrp;
  786. int status = 0;
  787. lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
  788. if (unlikely(lrp == NULL)) {
  789. status = -ENOMEM;
  790. spin_lock(&ino->i_lock);
  791. pnfs_clear_layoutreturn_waitbit(lo);
  792. spin_unlock(&ino->i_lock);
  793. pnfs_put_layout_hdr(lo);
  794. goto out;
  795. }
  796. nfs4_stateid_copy(&lrp->args.stateid, stateid);
  797. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  798. lrp->args.inode = ino;
  799. lrp->args.range.iomode = iomode;
  800. lrp->args.range.offset = 0;
  801. lrp->args.range.length = NFS4_MAX_UINT64;
  802. lrp->args.layout = lo;
  803. lrp->clp = NFS_SERVER(ino)->nfs_client;
  804. lrp->cred = lo->plh_lc_cred;
  805. status = nfs4_proc_layoutreturn(lrp, sync);
  806. out:
  807. dprintk("<-- %s status: %d\n", __func__, status);
  808. return status;
  809. }
  810. /* Return true if layoutreturn is needed */
  811. static bool
  812. pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
  813. {
  814. struct pnfs_layout_segment *s;
  815. if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
  816. return false;
  817. /* Defer layoutreturn until all lsegs are done */
  818. list_for_each_entry(s, &lo->plh_segs, pls_list) {
  819. if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
  820. return false;
  821. }
  822. return true;
  823. }
  824. static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
  825. {
  826. struct inode *inode= lo->plh_inode;
  827. if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
  828. return;
  829. spin_lock(&inode->i_lock);
  830. if (pnfs_layout_need_return(lo)) {
  831. nfs4_stateid stateid;
  832. enum pnfs_iomode iomode;
  833. bool send;
  834. nfs4_stateid_copy(&stateid, &lo->plh_stateid);
  835. stateid.seqid = cpu_to_be32(lo->plh_return_seq);
  836. iomode = lo->plh_return_iomode;
  837. send = pnfs_prepare_layoutreturn(lo);
  838. spin_unlock(&inode->i_lock);
  839. if (send) {
  840. /* Send an async layoutreturn so we dont deadlock */
  841. pnfs_send_layoutreturn(lo, &stateid, iomode, false);
  842. }
  843. } else
  844. spin_unlock(&inode->i_lock);
  845. }
  846. /*
  847. * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  848. * when the layout segment list is empty.
  849. *
  850. * Note that a pnfs_layout_hdr can exist with an empty layout segment
  851. * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
  852. * deviceid is marked invalid.
  853. */
  854. int
  855. _pnfs_return_layout(struct inode *ino)
  856. {
  857. struct pnfs_layout_hdr *lo = NULL;
  858. struct nfs_inode *nfsi = NFS_I(ino);
  859. LIST_HEAD(tmp_list);
  860. nfs4_stateid stateid;
  861. int status = 0, empty;
  862. bool send;
  863. dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
  864. spin_lock(&ino->i_lock);
  865. lo = nfsi->layout;
  866. if (!lo) {
  867. spin_unlock(&ino->i_lock);
  868. dprintk("NFS: %s no layout to return\n", __func__);
  869. goto out;
  870. }
  871. nfs4_stateid_copy(&stateid, &nfsi->layout->plh_stateid);
  872. /* Reference matched in nfs4_layoutreturn_release */
  873. pnfs_get_layout_hdr(lo);
  874. empty = list_empty(&lo->plh_segs);
  875. pnfs_clear_layoutcommit(ino, &tmp_list);
  876. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
  877. if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
  878. struct pnfs_layout_range range = {
  879. .iomode = IOMODE_ANY,
  880. .offset = 0,
  881. .length = NFS4_MAX_UINT64,
  882. };
  883. NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
  884. }
  885. /* Don't send a LAYOUTRETURN if list was initially empty */
  886. if (empty) {
  887. spin_unlock(&ino->i_lock);
  888. dprintk("NFS: %s no layout segments to return\n", __func__);
  889. goto out_put_layout_hdr;
  890. }
  891. set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
  892. send = pnfs_prepare_layoutreturn(lo);
  893. spin_unlock(&ino->i_lock);
  894. pnfs_free_lseg_list(&tmp_list);
  895. if (send)
  896. status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
  897. out_put_layout_hdr:
  898. pnfs_put_layout_hdr(lo);
  899. out:
  900. dprintk("<-- %s status: %d\n", __func__, status);
  901. return status;
  902. }
  903. EXPORT_SYMBOL_GPL(_pnfs_return_layout);
  904. int
  905. pnfs_commit_and_return_layout(struct inode *inode)
  906. {
  907. struct pnfs_layout_hdr *lo;
  908. int ret;
  909. spin_lock(&inode->i_lock);
  910. lo = NFS_I(inode)->layout;
  911. if (lo == NULL) {
  912. spin_unlock(&inode->i_lock);
  913. return 0;
  914. }
  915. pnfs_get_layout_hdr(lo);
  916. /* Block new layoutgets and read/write to ds */
  917. lo->plh_block_lgets++;
  918. spin_unlock(&inode->i_lock);
  919. filemap_fdatawait(inode->i_mapping);
  920. ret = pnfs_layoutcommit_inode(inode, true);
  921. if (ret == 0)
  922. ret = _pnfs_return_layout(inode);
  923. spin_lock(&inode->i_lock);
  924. lo->plh_block_lgets--;
  925. spin_unlock(&inode->i_lock);
  926. pnfs_put_layout_hdr(lo);
  927. return ret;
  928. }
  929. bool pnfs_roc(struct inode *ino)
  930. {
  931. struct nfs_inode *nfsi = NFS_I(ino);
  932. struct nfs_open_context *ctx;
  933. struct nfs4_state *state;
  934. struct pnfs_layout_hdr *lo;
  935. struct pnfs_layout_segment *lseg, *tmp;
  936. nfs4_stateid stateid;
  937. LIST_HEAD(tmp_list);
  938. bool found = false, layoutreturn = false, roc = false;
  939. spin_lock(&ino->i_lock);
  940. lo = nfsi->layout;
  941. if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  942. goto out_noroc;
  943. /* no roc if we hold a delegation */
  944. if (nfs4_check_delegation(ino, FMODE_READ))
  945. goto out_noroc;
  946. list_for_each_entry(ctx, &nfsi->open_files, list) {
  947. state = ctx->state;
  948. /* Don't return layout if there is open file state */
  949. if (state != NULL && state->state != 0)
  950. goto out_noroc;
  951. }
  952. nfs4_stateid_copy(&stateid, &lo->plh_stateid);
  953. /* always send layoutreturn if being marked so */
  954. if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED,
  955. &lo->plh_flags))
  956. layoutreturn = pnfs_prepare_layoutreturn(lo);
  957. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  958. /* If we are sending layoutreturn, invalidate all valid lsegs */
  959. if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  960. mark_lseg_invalid(lseg, &tmp_list);
  961. found = true;
  962. }
  963. /* ROC in two conditions:
  964. * 1. there are ROC lsegs
  965. * 2. we don't send layoutreturn
  966. */
  967. if (found && !layoutreturn) {
  968. /* lo ref dropped in pnfs_roc_release() */
  969. pnfs_get_layout_hdr(lo);
  970. roc = true;
  971. }
  972. out_noroc:
  973. spin_unlock(&ino->i_lock);
  974. pnfs_free_lseg_list(&tmp_list);
  975. pnfs_layoutcommit_inode(ino, true);
  976. if (layoutreturn)
  977. pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
  978. return roc;
  979. }
  980. void pnfs_roc_release(struct inode *ino)
  981. {
  982. struct pnfs_layout_hdr *lo;
  983. spin_lock(&ino->i_lock);
  984. lo = NFS_I(ino)->layout;
  985. pnfs_clear_layoutreturn_waitbit(lo);
  986. if (atomic_dec_and_test(&lo->plh_refcount)) {
  987. pnfs_detach_layout_hdr(lo);
  988. spin_unlock(&ino->i_lock);
  989. pnfs_free_layout_hdr(lo);
  990. } else
  991. spin_unlock(&ino->i_lock);
  992. }
  993. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  994. {
  995. struct pnfs_layout_hdr *lo;
  996. spin_lock(&ino->i_lock);
  997. lo = NFS_I(ino)->layout;
  998. pnfs_mark_layout_returned_if_empty(lo);
  999. if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
  1000. lo->plh_barrier = barrier;
  1001. spin_unlock(&ino->i_lock);
  1002. trace_nfs4_layoutreturn_on_close(ino, 0);
  1003. }
  1004. void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
  1005. {
  1006. struct nfs_inode *nfsi = NFS_I(ino);
  1007. struct pnfs_layout_hdr *lo;
  1008. u32 current_seqid;
  1009. spin_lock(&ino->i_lock);
  1010. lo = nfsi->layout;
  1011. current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
  1012. /* Since close does not return a layout stateid for use as
  1013. * a barrier, we choose the worst-case barrier.
  1014. */
  1015. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  1016. spin_unlock(&ino->i_lock);
  1017. }
  1018. bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
  1019. {
  1020. struct nfs_inode *nfsi = NFS_I(ino);
  1021. struct pnfs_layout_hdr *lo;
  1022. bool sleep = false;
  1023. /* we might not have grabbed lo reference. so need to check under
  1024. * i_lock */
  1025. spin_lock(&ino->i_lock);
  1026. lo = nfsi->layout;
  1027. if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
  1028. sleep = true;
  1029. spin_unlock(&ino->i_lock);
  1030. if (sleep)
  1031. rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
  1032. return sleep;
  1033. }
  1034. /*
  1035. * Compare two layout segments for sorting into layout cache.
  1036. * We want to preferentially return RW over RO layouts, so ensure those
  1037. * are seen first.
  1038. */
  1039. static s64
  1040. pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
  1041. const struct pnfs_layout_range *l2)
  1042. {
  1043. s64 d;
  1044. /* high offset > low offset */
  1045. d = l1->offset - l2->offset;
  1046. if (d)
  1047. return d;
  1048. /* short length > long length */
  1049. d = l2->length - l1->length;
  1050. if (d)
  1051. return d;
  1052. /* read > read/write */
  1053. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  1054. }
  1055. static bool
  1056. pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
  1057. const struct pnfs_layout_range *l2)
  1058. {
  1059. return pnfs_lseg_range_cmp(l1, l2) > 0;
  1060. }
  1061. static bool
  1062. pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
  1063. struct pnfs_layout_segment *old)
  1064. {
  1065. return false;
  1066. }
  1067. void
  1068. pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
  1069. struct pnfs_layout_segment *lseg,
  1070. bool (*is_after)(const struct pnfs_layout_range *,
  1071. const struct pnfs_layout_range *),
  1072. bool (*do_merge)(struct pnfs_layout_segment *,
  1073. struct pnfs_layout_segment *),
  1074. struct list_head *free_me)
  1075. {
  1076. struct pnfs_layout_segment *lp, *tmp;
  1077. dprintk("%s:Begin\n", __func__);
  1078. list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
  1079. if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
  1080. continue;
  1081. if (do_merge(lseg, lp)) {
  1082. mark_lseg_invalid(lp, free_me);
  1083. continue;
  1084. }
  1085. if (is_after(&lseg->pls_range, &lp->pls_range))
  1086. continue;
  1087. list_add_tail(&lseg->pls_list, &lp->pls_list);
  1088. dprintk("%s: inserted lseg %p "
  1089. "iomode %d offset %llu length %llu before "
  1090. "lp %p iomode %d offset %llu length %llu\n",
  1091. __func__, lseg, lseg->pls_range.iomode,
  1092. lseg->pls_range.offset, lseg->pls_range.length,
  1093. lp, lp->pls_range.iomode, lp->pls_range.offset,
  1094. lp->pls_range.length);
  1095. goto out;
  1096. }
  1097. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  1098. dprintk("%s: inserted lseg %p "
  1099. "iomode %d offset %llu length %llu at tail\n",
  1100. __func__, lseg, lseg->pls_range.iomode,
  1101. lseg->pls_range.offset, lseg->pls_range.length);
  1102. out:
  1103. pnfs_get_layout_hdr(lo);
  1104. dprintk("%s:Return\n", __func__);
  1105. }
  1106. EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
  1107. static void
  1108. pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
  1109. struct pnfs_layout_segment *lseg,
  1110. struct list_head *free_me)
  1111. {
  1112. struct inode *inode = lo->plh_inode;
  1113. struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
  1114. if (ld->add_lseg != NULL)
  1115. ld->add_lseg(lo, lseg, free_me);
  1116. else
  1117. pnfs_generic_layout_insert_lseg(lo, lseg,
  1118. pnfs_lseg_range_is_after,
  1119. pnfs_lseg_no_merge,
  1120. free_me);
  1121. }
  1122. static struct pnfs_layout_hdr *
  1123. alloc_init_layout_hdr(struct inode *ino,
  1124. struct nfs_open_context *ctx,
  1125. gfp_t gfp_flags)
  1126. {
  1127. struct pnfs_layout_hdr *lo;
  1128. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  1129. if (!lo)
  1130. return NULL;
  1131. atomic_set(&lo->plh_refcount, 1);
  1132. INIT_LIST_HEAD(&lo->plh_layouts);
  1133. INIT_LIST_HEAD(&lo->plh_segs);
  1134. INIT_LIST_HEAD(&lo->plh_bulk_destroy);
  1135. lo->plh_inode = ino;
  1136. lo->plh_lc_cred = get_rpccred(ctx->cred);
  1137. return lo;
  1138. }
  1139. static struct pnfs_layout_hdr *
  1140. pnfs_find_alloc_layout(struct inode *ino,
  1141. struct nfs_open_context *ctx,
  1142. gfp_t gfp_flags)
  1143. {
  1144. struct nfs_inode *nfsi = NFS_I(ino);
  1145. struct pnfs_layout_hdr *new = NULL;
  1146. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  1147. if (nfsi->layout != NULL)
  1148. goto out_existing;
  1149. spin_unlock(&ino->i_lock);
  1150. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  1151. spin_lock(&ino->i_lock);
  1152. if (likely(nfsi->layout == NULL)) { /* Won the race? */
  1153. nfsi->layout = new;
  1154. return new;
  1155. } else if (new != NULL)
  1156. pnfs_free_layout_hdr(new);
  1157. out_existing:
  1158. pnfs_get_layout_hdr(nfsi->layout);
  1159. return nfsi->layout;
  1160. }
  1161. /*
  1162. * iomode matching rules:
  1163. * iomode lseg strict match
  1164. * iomode
  1165. * ----- ----- ------ -----
  1166. * ANY READ N/A true
  1167. * ANY RW N/A true
  1168. * RW READ N/A false
  1169. * RW RW N/A true
  1170. * READ READ N/A true
  1171. * READ RW true false
  1172. * READ RW false true
  1173. */
  1174. static bool
  1175. pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
  1176. const struct pnfs_layout_range *range,
  1177. bool strict_iomode)
  1178. {
  1179. struct pnfs_layout_range range1;
  1180. if ((range->iomode == IOMODE_RW &&
  1181. ls_range->iomode != IOMODE_RW) ||
  1182. (range->iomode != ls_range->iomode &&
  1183. strict_iomode == true) ||
  1184. !pnfs_lseg_range_intersecting(ls_range, range))
  1185. return 0;
  1186. /* range1 covers only the first byte in the range */
  1187. range1 = *range;
  1188. range1.length = 1;
  1189. return pnfs_lseg_range_contained(ls_range, &range1);
  1190. }
  1191. /*
  1192. * lookup range in layout
  1193. */
  1194. static struct pnfs_layout_segment *
  1195. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  1196. struct pnfs_layout_range *range,
  1197. bool strict_iomode)
  1198. {
  1199. struct pnfs_layout_segment *lseg, *ret = NULL;
  1200. dprintk("%s:Begin\n", __func__);
  1201. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  1202. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  1203. !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
  1204. pnfs_lseg_range_match(&lseg->pls_range, range,
  1205. strict_iomode)) {
  1206. ret = pnfs_get_lseg(lseg);
  1207. break;
  1208. }
  1209. }
  1210. dprintk("%s:Return lseg %p ref %d\n",
  1211. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  1212. return ret;
  1213. }
  1214. /*
  1215. * Use mdsthreshold hints set at each OPEN to determine if I/O should go
  1216. * to the MDS or over pNFS
  1217. *
  1218. * The nfs_inode read_io and write_io fields are cumulative counters reset
  1219. * when there are no layout segments. Note that in pnfs_update_layout iomode
  1220. * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
  1221. * WRITE request.
  1222. *
  1223. * A return of true means use MDS I/O.
  1224. *
  1225. * From rfc 5661:
  1226. * If a file's size is smaller than the file size threshold, data accesses
  1227. * SHOULD be sent to the metadata server. If an I/O request has a length that
  1228. * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
  1229. * server. If both file size and I/O size are provided, the client SHOULD
  1230. * reach or exceed both thresholds before sending its read or write
  1231. * requests to the data server.
  1232. */
  1233. static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
  1234. struct inode *ino, int iomode)
  1235. {
  1236. struct nfs4_threshold *t = ctx->mdsthreshold;
  1237. struct nfs_inode *nfsi = NFS_I(ino);
  1238. loff_t fsize = i_size_read(ino);
  1239. bool size = false, size_set = false, io = false, io_set = false, ret = false;
  1240. if (t == NULL)
  1241. return ret;
  1242. dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
  1243. __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
  1244. switch (iomode) {
  1245. case IOMODE_READ:
  1246. if (t->bm & THRESHOLD_RD) {
  1247. dprintk("%s fsize %llu\n", __func__, fsize);
  1248. size_set = true;
  1249. if (fsize < t->rd_sz)
  1250. size = true;
  1251. }
  1252. if (t->bm & THRESHOLD_RD_IO) {
  1253. dprintk("%s nfsi->read_io %llu\n", __func__,
  1254. nfsi->read_io);
  1255. io_set = true;
  1256. if (nfsi->read_io < t->rd_io_sz)
  1257. io = true;
  1258. }
  1259. break;
  1260. case IOMODE_RW:
  1261. if (t->bm & THRESHOLD_WR) {
  1262. dprintk("%s fsize %llu\n", __func__, fsize);
  1263. size_set = true;
  1264. if (fsize < t->wr_sz)
  1265. size = true;
  1266. }
  1267. if (t->bm & THRESHOLD_WR_IO) {
  1268. dprintk("%s nfsi->write_io %llu\n", __func__,
  1269. nfsi->write_io);
  1270. io_set = true;
  1271. if (nfsi->write_io < t->wr_io_sz)
  1272. io = true;
  1273. }
  1274. break;
  1275. }
  1276. if (size_set && io_set) {
  1277. if (size && io)
  1278. ret = true;
  1279. } else if (size || io)
  1280. ret = true;
  1281. dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
  1282. return ret;
  1283. }
  1284. static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
  1285. {
  1286. /*
  1287. * send layoutcommit as it can hold up layoutreturn due to lseg
  1288. * reference
  1289. */
  1290. pnfs_layoutcommit_inode(lo->plh_inode, false);
  1291. return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
  1292. nfs_wait_bit_killable,
  1293. TASK_UNINTERRUPTIBLE);
  1294. }
  1295. static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
  1296. {
  1297. unsigned long *bitlock = &lo->plh_flags;
  1298. clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
  1299. smp_mb__after_atomic();
  1300. wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
  1301. }
  1302. /*
  1303. * Layout segment is retreived from the server if not cached.
  1304. * The appropriate layout segment is referenced and returned to the caller.
  1305. */
  1306. struct pnfs_layout_segment *
  1307. pnfs_update_layout(struct inode *ino,
  1308. struct nfs_open_context *ctx,
  1309. loff_t pos,
  1310. u64 count,
  1311. enum pnfs_iomode iomode,
  1312. bool strict_iomode,
  1313. gfp_t gfp_flags)
  1314. {
  1315. struct pnfs_layout_range arg = {
  1316. .iomode = iomode,
  1317. .offset = pos,
  1318. .length = count,
  1319. };
  1320. unsigned pg_offset, seq;
  1321. struct nfs_server *server = NFS_SERVER(ino);
  1322. struct nfs_client *clp = server->nfs_client;
  1323. struct pnfs_layout_hdr *lo = NULL;
  1324. struct pnfs_layout_segment *lseg = NULL;
  1325. nfs4_stateid stateid;
  1326. long timeout = 0;
  1327. unsigned long giveup = jiffies + rpc_get_timeout(server->client);
  1328. bool first;
  1329. if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
  1330. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1331. PNFS_UPDATE_LAYOUT_NO_PNFS);
  1332. goto out;
  1333. }
  1334. if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
  1335. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1336. PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
  1337. goto out;
  1338. }
  1339. if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
  1340. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1341. PNFS_UPDATE_LAYOUT_MDSTHRESH);
  1342. goto out;
  1343. }
  1344. lookup_again:
  1345. first = false;
  1346. spin_lock(&ino->i_lock);
  1347. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  1348. if (lo == NULL) {
  1349. spin_unlock(&ino->i_lock);
  1350. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1351. PNFS_UPDATE_LAYOUT_NOMEM);
  1352. goto out;
  1353. }
  1354. /* Do we even need to bother with this? */
  1355. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1356. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1357. PNFS_UPDATE_LAYOUT_BULK_RECALL);
  1358. dprintk("%s matches recall, use MDS\n", __func__);
  1359. goto out_unlock;
  1360. }
  1361. /* if LAYOUTGET already failed once we don't try again */
  1362. if (pnfs_layout_io_test_failed(lo, iomode)) {
  1363. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1364. PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
  1365. goto out_unlock;
  1366. }
  1367. lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
  1368. if (lseg) {
  1369. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1370. PNFS_UPDATE_LAYOUT_FOUND_CACHED);
  1371. goto out_unlock;
  1372. }
  1373. if (!nfs4_valid_open_stateid(ctx->state)) {
  1374. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1375. PNFS_UPDATE_LAYOUT_INVALID_OPEN);
  1376. goto out_unlock;
  1377. }
  1378. /*
  1379. * Choose a stateid for the LAYOUTGET. If we don't have a layout
  1380. * stateid, or it has been invalidated, then we must use the open
  1381. * stateid.
  1382. */
  1383. if (lo->plh_stateid.seqid == 0 ||
  1384. test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
  1385. /*
  1386. * The first layoutget for the file. Need to serialize per
  1387. * RFC 5661 Errata 3208.
  1388. */
  1389. if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
  1390. &lo->plh_flags)) {
  1391. spin_unlock(&ino->i_lock);
  1392. wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
  1393. TASK_UNINTERRUPTIBLE);
  1394. pnfs_put_layout_hdr(lo);
  1395. dprintk("%s retrying\n", __func__);
  1396. goto lookup_again;
  1397. }
  1398. first = true;
  1399. do {
  1400. seq = read_seqbegin(&ctx->state->seqlock);
  1401. nfs4_stateid_copy(&stateid, &ctx->state->stateid);
  1402. } while (read_seqretry(&ctx->state->seqlock, seq));
  1403. } else {
  1404. nfs4_stateid_copy(&stateid, &lo->plh_stateid);
  1405. }
  1406. /*
  1407. * Because we free lsegs before sending LAYOUTRETURN, we need to wait
  1408. * for LAYOUTRETURN even if first is true.
  1409. */
  1410. if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
  1411. spin_unlock(&ino->i_lock);
  1412. dprintk("%s wait for layoutreturn\n", __func__);
  1413. if (pnfs_prepare_to_retry_layoutget(lo)) {
  1414. if (first)
  1415. pnfs_clear_first_layoutget(lo);
  1416. pnfs_put_layout_hdr(lo);
  1417. dprintk("%s retrying\n", __func__);
  1418. trace_pnfs_update_layout(ino, pos, count, iomode, lo,
  1419. lseg, PNFS_UPDATE_LAYOUT_RETRY);
  1420. goto lookup_again;
  1421. }
  1422. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1423. PNFS_UPDATE_LAYOUT_RETURN);
  1424. goto out_put_layout_hdr;
  1425. }
  1426. if (pnfs_layoutgets_blocked(lo)) {
  1427. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1428. PNFS_UPDATE_LAYOUT_BLOCKED);
  1429. goto out_unlock;
  1430. }
  1431. atomic_inc(&lo->plh_outstanding);
  1432. spin_unlock(&ino->i_lock);
  1433. if (list_empty(&lo->plh_layouts)) {
  1434. /* The lo must be on the clp list if there is any
  1435. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  1436. */
  1437. spin_lock(&clp->cl_lock);
  1438. if (list_empty(&lo->plh_layouts))
  1439. list_add_tail(&lo->plh_layouts, &server->layouts);
  1440. spin_unlock(&clp->cl_lock);
  1441. }
  1442. pg_offset = arg.offset & ~PAGE_MASK;
  1443. if (pg_offset) {
  1444. arg.offset -= pg_offset;
  1445. arg.length += pg_offset;
  1446. }
  1447. if (arg.length != NFS4_MAX_UINT64)
  1448. arg.length = PAGE_ALIGN(arg.length);
  1449. lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
  1450. trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
  1451. PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
  1452. if (IS_ERR(lseg)) {
  1453. switch(PTR_ERR(lseg)) {
  1454. case -ERECALLCONFLICT:
  1455. if (time_after(jiffies, giveup))
  1456. lseg = NULL;
  1457. /* Fallthrough */
  1458. case -EAGAIN:
  1459. pnfs_put_layout_hdr(lo);
  1460. if (first)
  1461. pnfs_clear_first_layoutget(lo);
  1462. if (lseg) {
  1463. trace_pnfs_update_layout(ino, pos, count,
  1464. iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
  1465. goto lookup_again;
  1466. }
  1467. /* Fallthrough */
  1468. default:
  1469. if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
  1470. pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  1471. lseg = NULL;
  1472. }
  1473. }
  1474. } else {
  1475. pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  1476. }
  1477. atomic_dec(&lo->plh_outstanding);
  1478. out_put_layout_hdr:
  1479. if (first)
  1480. pnfs_clear_first_layoutget(lo);
  1481. pnfs_put_layout_hdr(lo);
  1482. out:
  1483. dprintk("%s: inode %s/%llu pNFS layout segment %s for "
  1484. "(%s, offset: %llu, length: %llu)\n",
  1485. __func__, ino->i_sb->s_id,
  1486. (unsigned long long)NFS_FILEID(ino),
  1487. IS_ERR_OR_NULL(lseg) ? "not found" : "found",
  1488. iomode==IOMODE_RW ? "read/write" : "read-only",
  1489. (unsigned long long)pos,
  1490. (unsigned long long)count);
  1491. return lseg;
  1492. out_unlock:
  1493. spin_unlock(&ino->i_lock);
  1494. goto out_put_layout_hdr;
  1495. }
  1496. EXPORT_SYMBOL_GPL(pnfs_update_layout);
  1497. static bool
  1498. pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
  1499. {
  1500. switch (range->iomode) {
  1501. case IOMODE_READ:
  1502. case IOMODE_RW:
  1503. break;
  1504. default:
  1505. return false;
  1506. }
  1507. if (range->offset == NFS4_MAX_UINT64)
  1508. return false;
  1509. if (range->length == 0)
  1510. return false;
  1511. if (range->length != NFS4_MAX_UINT64 &&
  1512. range->length > NFS4_MAX_UINT64 - range->offset)
  1513. return false;
  1514. return true;
  1515. }
  1516. struct pnfs_layout_segment *
  1517. pnfs_layout_process(struct nfs4_layoutget *lgp)
  1518. {
  1519. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  1520. struct nfs4_layoutget_res *res = &lgp->res;
  1521. struct pnfs_layout_segment *lseg;
  1522. struct inode *ino = lo->plh_inode;
  1523. LIST_HEAD(free_me);
  1524. if (!pnfs_sanity_check_layout_range(&res->range))
  1525. return ERR_PTR(-EINVAL);
  1526. /* Inject layout blob into I/O device driver */
  1527. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  1528. if (IS_ERR_OR_NULL(lseg)) {
  1529. if (!lseg)
  1530. lseg = ERR_PTR(-ENOMEM);
  1531. dprintk("%s: Could not allocate layout: error %ld\n",
  1532. __func__, PTR_ERR(lseg));
  1533. return lseg;
  1534. }
  1535. init_lseg(lo, lseg);
  1536. lseg->pls_range = res->range;
  1537. lseg->pls_seq = be32_to_cpu(res->stateid.seqid);
  1538. spin_lock(&ino->i_lock);
  1539. if (pnfs_layoutgets_blocked(lo)) {
  1540. dprintk("%s forget reply due to state\n", __func__);
  1541. goto out_forget;
  1542. }
  1543. if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
  1544. /* existing state ID, make sure the sequence number matches. */
  1545. if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
  1546. dprintk("%s forget reply due to sequence\n", __func__);
  1547. goto out_forget;
  1548. }
  1549. pnfs_set_layout_stateid(lo, &res->stateid, false);
  1550. } else {
  1551. /*
  1552. * We got an entirely new state ID. Mark all segments for the
  1553. * inode invalid, and don't bother validating the stateid
  1554. * sequence number.
  1555. */
  1556. pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL, 0);
  1557. nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
  1558. lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
  1559. }
  1560. clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
  1561. pnfs_get_lseg(lseg);
  1562. pnfs_layout_insert_lseg(lo, lseg, &free_me);
  1563. if (res->return_on_close)
  1564. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  1565. spin_unlock(&ino->i_lock);
  1566. pnfs_free_lseg_list(&free_me);
  1567. return lseg;
  1568. out_forget:
  1569. spin_unlock(&ino->i_lock);
  1570. lseg->pls_layout = lo;
  1571. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  1572. return ERR_PTR(-EAGAIN);
  1573. }
  1574. static void
  1575. pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
  1576. u32 seq)
  1577. {
  1578. if (lo->plh_return_iomode == iomode)
  1579. return;
  1580. if (lo->plh_return_iomode != 0)
  1581. iomode = IOMODE_ANY;
  1582. lo->plh_return_iomode = iomode;
  1583. set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
  1584. if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
  1585. lo->plh_return_seq = seq;
  1586. }
  1587. /**
  1588. * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
  1589. * @lo: pointer to layout header
  1590. * @tmp_list: list header to be used with pnfs_free_lseg_list()
  1591. * @return_range: describe layout segment ranges to be returned
  1592. *
  1593. * This function is mainly intended for use by layoutrecall. It attempts
  1594. * to free the layout segment immediately, or else to mark it for return
  1595. * as soon as its reference count drops to zero.
  1596. */
  1597. int
  1598. pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
  1599. struct list_head *tmp_list,
  1600. const struct pnfs_layout_range *return_range,
  1601. u32 seq)
  1602. {
  1603. struct pnfs_layout_segment *lseg, *next;
  1604. int remaining = 0;
  1605. dprintk("%s:Begin lo %p\n", __func__, lo);
  1606. if (list_empty(&lo->plh_segs))
  1607. return 0;
  1608. assert_spin_locked(&lo->plh_inode->i_lock);
  1609. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  1610. if (should_free_lseg(&lseg->pls_range, return_range)) {
  1611. dprintk("%s: marking lseg %p iomode %d "
  1612. "offset %llu length %llu\n", __func__,
  1613. lseg, lseg->pls_range.iomode,
  1614. lseg->pls_range.offset,
  1615. lseg->pls_range.length);
  1616. if (mark_lseg_invalid(lseg, tmp_list))
  1617. continue;
  1618. remaining++;
  1619. set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
  1620. }
  1621. if (remaining)
  1622. pnfs_set_plh_return_info(lo, return_range->iomode, seq);
  1623. return remaining;
  1624. }
  1625. void pnfs_error_mark_layout_for_return(struct inode *inode,
  1626. struct pnfs_layout_segment *lseg)
  1627. {
  1628. struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
  1629. struct pnfs_layout_range range = {
  1630. .iomode = lseg->pls_range.iomode,
  1631. .offset = 0,
  1632. .length = NFS4_MAX_UINT64,
  1633. };
  1634. LIST_HEAD(free_me);
  1635. bool return_now = false;
  1636. spin_lock(&inode->i_lock);
  1637. pnfs_set_plh_return_info(lo, range.iomode, lseg->pls_seq);
  1638. /*
  1639. * mark all matching lsegs so that we are sure to have no live
  1640. * segments at hand when sending layoutreturn. See pnfs_put_lseg()
  1641. * for how it works.
  1642. */
  1643. if (!pnfs_mark_matching_lsegs_return(lo, &free_me,
  1644. &range, lseg->pls_seq)) {
  1645. nfs4_stateid stateid;
  1646. enum pnfs_iomode iomode = lo->plh_return_iomode;
  1647. nfs4_stateid_copy(&stateid, &lo->plh_stateid);
  1648. return_now = pnfs_prepare_layoutreturn(lo);
  1649. spin_unlock(&inode->i_lock);
  1650. if (return_now)
  1651. pnfs_send_layoutreturn(lo, &stateid, iomode, false);
  1652. } else {
  1653. spin_unlock(&inode->i_lock);
  1654. nfs_commit_inode(inode, 0);
  1655. }
  1656. pnfs_free_lseg_list(&free_me);
  1657. }
  1658. EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
  1659. void
  1660. pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1661. {
  1662. u64 rd_size = req->wb_bytes;
  1663. if (pgio->pg_lseg == NULL) {
  1664. if (pgio->pg_dreq == NULL)
  1665. rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
  1666. else
  1667. rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  1668. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1669. req->wb_context,
  1670. req_offset(req),
  1671. rd_size,
  1672. IOMODE_READ,
  1673. false,
  1674. GFP_KERNEL);
  1675. if (IS_ERR(pgio->pg_lseg)) {
  1676. pgio->pg_error = PTR_ERR(pgio->pg_lseg);
  1677. pgio->pg_lseg = NULL;
  1678. return;
  1679. }
  1680. }
  1681. /* If no lseg, fall back to read through mds */
  1682. if (pgio->pg_lseg == NULL)
  1683. nfs_pageio_reset_read_mds(pgio);
  1684. }
  1685. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
  1686. void
  1687. pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
  1688. struct nfs_page *req, u64 wb_size)
  1689. {
  1690. if (pgio->pg_lseg == NULL) {
  1691. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1692. req->wb_context,
  1693. req_offset(req),
  1694. wb_size,
  1695. IOMODE_RW,
  1696. false,
  1697. GFP_NOFS);
  1698. if (IS_ERR(pgio->pg_lseg)) {
  1699. pgio->pg_error = PTR_ERR(pgio->pg_lseg);
  1700. pgio->pg_lseg = NULL;
  1701. return;
  1702. }
  1703. }
  1704. /* If no lseg, fall back to write through mds */
  1705. if (pgio->pg_lseg == NULL)
  1706. nfs_pageio_reset_write_mds(pgio);
  1707. }
  1708. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
  1709. void
  1710. pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
  1711. {
  1712. if (desc->pg_lseg) {
  1713. pnfs_put_lseg(desc->pg_lseg);
  1714. desc->pg_lseg = NULL;
  1715. }
  1716. }
  1717. EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
  1718. /*
  1719. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  1720. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  1721. */
  1722. size_t
  1723. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
  1724. struct nfs_page *prev, struct nfs_page *req)
  1725. {
  1726. unsigned int size;
  1727. u64 seg_end, req_start, seg_left;
  1728. size = nfs_generic_pg_test(pgio, prev, req);
  1729. if (!size)
  1730. return 0;
  1731. /*
  1732. * 'size' contains the number of bytes left in the current page (up
  1733. * to the original size asked for in @req->wb_bytes).
  1734. *
  1735. * Calculate how many bytes are left in the layout segment
  1736. * and if there are less bytes than 'size', return that instead.
  1737. *
  1738. * Please also note that 'end_offset' is actually the offset of the
  1739. * first byte that lies outside the pnfs_layout_range. FIXME?
  1740. *
  1741. */
  1742. if (pgio->pg_lseg) {
  1743. seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
  1744. pgio->pg_lseg->pls_range.length);
  1745. req_start = req_offset(req);
  1746. WARN_ON_ONCE(req_start >= seg_end);
  1747. /* start of request is past the last byte of this segment */
  1748. if (req_start >= seg_end) {
  1749. /* reference the new lseg */
  1750. if (pgio->pg_ops->pg_cleanup)
  1751. pgio->pg_ops->pg_cleanup(pgio);
  1752. if (pgio->pg_ops->pg_init)
  1753. pgio->pg_ops->pg_init(pgio, req);
  1754. return 0;
  1755. }
  1756. /* adjust 'size' iff there are fewer bytes left in the
  1757. * segment than what nfs_generic_pg_test returned */
  1758. seg_left = seg_end - req_start;
  1759. if (seg_left < size)
  1760. size = (unsigned int)seg_left;
  1761. }
  1762. return size;
  1763. }
  1764. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  1765. int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
  1766. {
  1767. struct nfs_pageio_descriptor pgio;
  1768. /* Resend all requests through the MDS */
  1769. nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
  1770. hdr->completion_ops);
  1771. set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
  1772. return nfs_pageio_resend(&pgio, hdr);
  1773. }
  1774. EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
  1775. static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
  1776. {
  1777. dprintk("pnfs write error = %d\n", hdr->pnfs_error);
  1778. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1779. PNFS_LAYOUTRET_ON_ERROR) {
  1780. pnfs_return_layout(hdr->inode);
  1781. }
  1782. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1783. hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
  1784. }
  1785. /*
  1786. * Called by non rpc-based layout drivers
  1787. */
  1788. void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
  1789. {
  1790. if (likely(!hdr->pnfs_error)) {
  1791. pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
  1792. hdr->mds_offset + hdr->res.count);
  1793. hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
  1794. }
  1795. trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
  1796. if (unlikely(hdr->pnfs_error))
  1797. pnfs_ld_handle_write_error(hdr);
  1798. hdr->mds_ops->rpc_release(hdr);
  1799. }
  1800. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1801. static void
  1802. pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
  1803. struct nfs_pgio_header *hdr)
  1804. {
  1805. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  1806. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1807. list_splice_tail_init(&hdr->pages, &mirror->pg_list);
  1808. nfs_pageio_reset_write_mds(desc);
  1809. mirror->pg_recoalesce = 1;
  1810. }
  1811. nfs_pgio_data_destroy(hdr);
  1812. hdr->release(hdr);
  1813. }
  1814. static enum pnfs_try_status
  1815. pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
  1816. const struct rpc_call_ops *call_ops,
  1817. struct pnfs_layout_segment *lseg,
  1818. int how)
  1819. {
  1820. struct inode *inode = hdr->inode;
  1821. enum pnfs_try_status trypnfs;
  1822. struct nfs_server *nfss = NFS_SERVER(inode);
  1823. hdr->mds_ops = call_ops;
  1824. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1825. inode->i_ino, hdr->args.count, hdr->args.offset, how);
  1826. trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
  1827. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1828. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1829. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1830. return trypnfs;
  1831. }
  1832. static void
  1833. pnfs_do_write(struct nfs_pageio_descriptor *desc,
  1834. struct nfs_pgio_header *hdr, int how)
  1835. {
  1836. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1837. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1838. enum pnfs_try_status trypnfs;
  1839. trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
  1840. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1841. pnfs_write_through_mds(desc, hdr);
  1842. }
  1843. static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
  1844. {
  1845. pnfs_put_lseg(hdr->lseg);
  1846. nfs_pgio_header_free(hdr);
  1847. }
  1848. int
  1849. pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
  1850. {
  1851. struct nfs_pgio_header *hdr;
  1852. int ret;
  1853. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  1854. if (!hdr) {
  1855. desc->pg_error = -ENOMEM;
  1856. return desc->pg_error;
  1857. }
  1858. nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
  1859. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1860. ret = nfs_generic_pgio(desc, hdr);
  1861. if (!ret)
  1862. pnfs_do_write(desc, hdr, desc->pg_ioflags);
  1863. return ret;
  1864. }
  1865. EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
  1866. int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
  1867. {
  1868. struct nfs_pageio_descriptor pgio;
  1869. /* Resend all requests through the MDS */
  1870. nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
  1871. return nfs_pageio_resend(&pgio, hdr);
  1872. }
  1873. EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
  1874. static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
  1875. {
  1876. dprintk("pnfs read error = %d\n", hdr->pnfs_error);
  1877. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1878. PNFS_LAYOUTRET_ON_ERROR) {
  1879. pnfs_return_layout(hdr->inode);
  1880. }
  1881. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1882. hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
  1883. }
  1884. /*
  1885. * Called by non rpc-based layout drivers
  1886. */
  1887. void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
  1888. {
  1889. if (likely(!hdr->pnfs_error)) {
  1890. __nfs4_read_done_cb(hdr);
  1891. hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
  1892. }
  1893. trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
  1894. if (unlikely(hdr->pnfs_error))
  1895. pnfs_ld_handle_read_error(hdr);
  1896. hdr->mds_ops->rpc_release(hdr);
  1897. }
  1898. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1899. static void
  1900. pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
  1901. struct nfs_pgio_header *hdr)
  1902. {
  1903. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  1904. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1905. list_splice_tail_init(&hdr->pages, &mirror->pg_list);
  1906. nfs_pageio_reset_read_mds(desc);
  1907. mirror->pg_recoalesce = 1;
  1908. }
  1909. nfs_pgio_data_destroy(hdr);
  1910. hdr->release(hdr);
  1911. }
  1912. /*
  1913. * Call the appropriate parallel I/O subsystem read function.
  1914. */
  1915. static enum pnfs_try_status
  1916. pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
  1917. const struct rpc_call_ops *call_ops,
  1918. struct pnfs_layout_segment *lseg)
  1919. {
  1920. struct inode *inode = hdr->inode;
  1921. struct nfs_server *nfss = NFS_SERVER(inode);
  1922. enum pnfs_try_status trypnfs;
  1923. hdr->mds_ops = call_ops;
  1924. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1925. __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
  1926. trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
  1927. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1928. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1929. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1930. return trypnfs;
  1931. }
  1932. /* Resend all requests through pnfs. */
  1933. void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
  1934. {
  1935. struct nfs_pageio_descriptor pgio;
  1936. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1937. nfs_pageio_init_read(&pgio, hdr->inode, false,
  1938. hdr->completion_ops);
  1939. hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
  1940. }
  1941. }
  1942. EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
  1943. static void
  1944. pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
  1945. {
  1946. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1947. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1948. enum pnfs_try_status trypnfs;
  1949. trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
  1950. if (trypnfs == PNFS_TRY_AGAIN)
  1951. pnfs_read_resend_pnfs(hdr);
  1952. if (trypnfs == PNFS_NOT_ATTEMPTED || hdr->task.tk_status)
  1953. pnfs_read_through_mds(desc, hdr);
  1954. }
  1955. static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
  1956. {
  1957. pnfs_put_lseg(hdr->lseg);
  1958. nfs_pgio_header_free(hdr);
  1959. }
  1960. int
  1961. pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  1962. {
  1963. struct nfs_pgio_header *hdr;
  1964. int ret;
  1965. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  1966. if (!hdr) {
  1967. desc->pg_error = -ENOMEM;
  1968. return desc->pg_error;
  1969. }
  1970. nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
  1971. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1972. ret = nfs_generic_pgio(desc, hdr);
  1973. if (!ret)
  1974. pnfs_do_read(desc, hdr);
  1975. return ret;
  1976. }
  1977. EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
  1978. static void pnfs_clear_layoutcommitting(struct inode *inode)
  1979. {
  1980. unsigned long *bitlock = &NFS_I(inode)->flags;
  1981. clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
  1982. smp_mb__after_atomic();
  1983. wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
  1984. }
  1985. /*
  1986. * There can be multiple RW segments.
  1987. */
  1988. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1989. {
  1990. struct pnfs_layout_segment *lseg;
  1991. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1992. if (lseg->pls_range.iomode == IOMODE_RW &&
  1993. test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1994. list_add(&lseg->pls_lc_list, listp);
  1995. }
  1996. }
  1997. static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
  1998. {
  1999. struct pnfs_layout_segment *lseg, *tmp;
  2000. /* Matched by references in pnfs_set_layoutcommit */
  2001. list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
  2002. list_del_init(&lseg->pls_lc_list);
  2003. pnfs_put_lseg(lseg);
  2004. }
  2005. pnfs_clear_layoutcommitting(inode);
  2006. }
  2007. void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
  2008. {
  2009. pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
  2010. }
  2011. EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
  2012. void
  2013. pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
  2014. loff_t end_pos)
  2015. {
  2016. struct nfs_inode *nfsi = NFS_I(inode);
  2017. bool mark_as_dirty = false;
  2018. spin_lock(&inode->i_lock);
  2019. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  2020. nfsi->layout->plh_lwb = end_pos;
  2021. mark_as_dirty = true;
  2022. dprintk("%s: Set layoutcommit for inode %lu ",
  2023. __func__, inode->i_ino);
  2024. } else if (end_pos > nfsi->layout->plh_lwb)
  2025. nfsi->layout->plh_lwb = end_pos;
  2026. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
  2027. /* references matched in nfs4_layoutcommit_release */
  2028. pnfs_get_lseg(lseg);
  2029. }
  2030. spin_unlock(&inode->i_lock);
  2031. dprintk("%s: lseg %p end_pos %llu\n",
  2032. __func__, lseg, nfsi->layout->plh_lwb);
  2033. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  2034. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  2035. if (mark_as_dirty)
  2036. mark_inode_dirty_sync(inode);
  2037. }
  2038. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  2039. void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
  2040. {
  2041. struct nfs_server *nfss = NFS_SERVER(data->args.inode);
  2042. if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
  2043. nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
  2044. pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
  2045. }
  2046. /*
  2047. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  2048. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  2049. * data to disk to allow the server to recover the data if it crashes.
  2050. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  2051. * is off, and a COMMIT is sent to a data server, or
  2052. * if WRITEs to a data server return NFS_DATA_SYNC.
  2053. */
  2054. int
  2055. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  2056. {
  2057. struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
  2058. struct nfs4_layoutcommit_data *data;
  2059. struct nfs_inode *nfsi = NFS_I(inode);
  2060. loff_t end_pos;
  2061. int status;
  2062. if (!pnfs_layoutcommit_outstanding(inode))
  2063. return 0;
  2064. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  2065. status = -EAGAIN;
  2066. if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
  2067. if (!sync)
  2068. goto out;
  2069. status = wait_on_bit_lock_action(&nfsi->flags,
  2070. NFS_INO_LAYOUTCOMMITTING,
  2071. nfs_wait_bit_killable,
  2072. TASK_KILLABLE);
  2073. if (status)
  2074. goto out;
  2075. }
  2076. status = -ENOMEM;
  2077. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  2078. data = kzalloc(sizeof(*data), GFP_NOFS);
  2079. if (!data)
  2080. goto clear_layoutcommitting;
  2081. status = 0;
  2082. spin_lock(&inode->i_lock);
  2083. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  2084. goto out_unlock;
  2085. INIT_LIST_HEAD(&data->lseg_list);
  2086. pnfs_list_write_lseg(inode, &data->lseg_list);
  2087. end_pos = nfsi->layout->plh_lwb;
  2088. nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
  2089. spin_unlock(&inode->i_lock);
  2090. data->args.inode = inode;
  2091. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  2092. nfs_fattr_init(&data->fattr);
  2093. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  2094. data->res.fattr = &data->fattr;
  2095. data->args.lastbytewritten = end_pos - 1;
  2096. data->res.server = NFS_SERVER(inode);
  2097. if (ld->prepare_layoutcommit) {
  2098. status = ld->prepare_layoutcommit(&data->args);
  2099. if (status) {
  2100. put_rpccred(data->cred);
  2101. spin_lock(&inode->i_lock);
  2102. set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
  2103. if (end_pos > nfsi->layout->plh_lwb)
  2104. nfsi->layout->plh_lwb = end_pos;
  2105. goto out_unlock;
  2106. }
  2107. }
  2108. status = nfs4_proc_layoutcommit(data, sync);
  2109. out:
  2110. if (status)
  2111. mark_inode_dirty_sync(inode);
  2112. dprintk("<-- %s status %d\n", __func__, status);
  2113. return status;
  2114. out_unlock:
  2115. spin_unlock(&inode->i_lock);
  2116. kfree(data);
  2117. clear_layoutcommitting:
  2118. pnfs_clear_layoutcommitting(inode);
  2119. goto out;
  2120. }
  2121. EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
  2122. int
  2123. pnfs_generic_sync(struct inode *inode, bool datasync)
  2124. {
  2125. return pnfs_layoutcommit_inode(inode, true);
  2126. }
  2127. EXPORT_SYMBOL_GPL(pnfs_generic_sync);
  2128. struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
  2129. {
  2130. struct nfs4_threshold *thp;
  2131. thp = kzalloc(sizeof(*thp), GFP_NOFS);
  2132. if (!thp) {
  2133. dprintk("%s mdsthreshold allocation failed\n", __func__);
  2134. return NULL;
  2135. }
  2136. return thp;
  2137. }
  2138. #if IS_ENABLED(CONFIG_NFS_V4_2)
  2139. int
  2140. pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
  2141. {
  2142. struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
  2143. struct nfs_server *server = NFS_SERVER(inode);
  2144. struct nfs_inode *nfsi = NFS_I(inode);
  2145. struct nfs42_layoutstat_data *data;
  2146. struct pnfs_layout_hdr *hdr;
  2147. int status = 0;
  2148. if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
  2149. goto out;
  2150. if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
  2151. goto out;
  2152. if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
  2153. goto out;
  2154. spin_lock(&inode->i_lock);
  2155. if (!NFS_I(inode)->layout) {
  2156. spin_unlock(&inode->i_lock);
  2157. goto out_clear_layoutstats;
  2158. }
  2159. hdr = NFS_I(inode)->layout;
  2160. pnfs_get_layout_hdr(hdr);
  2161. spin_unlock(&inode->i_lock);
  2162. data = kzalloc(sizeof(*data), gfp_flags);
  2163. if (!data) {
  2164. status = -ENOMEM;
  2165. goto out_put;
  2166. }
  2167. data->args.fh = NFS_FH(inode);
  2168. data->args.inode = inode;
  2169. nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
  2170. status = ld->prepare_layoutstats(&data->args);
  2171. if (status)
  2172. goto out_free;
  2173. status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
  2174. out:
  2175. dprintk("%s returns %d\n", __func__, status);
  2176. return status;
  2177. out_free:
  2178. kfree(data);
  2179. out_put:
  2180. pnfs_put_layout_hdr(hdr);
  2181. out_clear_layoutstats:
  2182. smp_mb__before_atomic();
  2183. clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
  2184. smp_mb__after_atomic();
  2185. goto out;
  2186. }
  2187. EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
  2188. #endif
  2189. unsigned int layoutstats_timer;
  2190. module_param(layoutstats_timer, uint, 0644);
  2191. EXPORT_SYMBOL_GPL(layoutstats_timer);