locks.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833
  1. /*
  2. * linux/fs/locks.c
  3. *
  4. * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
  5. * Doug Evans (dje@spiff.uucp), August 07, 1992
  6. *
  7. * Deadlock detection added.
  8. * FIXME: one thing isn't handled yet:
  9. * - mandatory locks (requires lots of changes elsewhere)
  10. * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
  11. *
  12. * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
  13. * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
  14. *
  15. * Converted file_lock_table to a linked list from an array, which eliminates
  16. * the limits on how many active file locks are open.
  17. * Chad Page (pageone@netcom.com), November 27, 1994
  18. *
  19. * Removed dependency on file descriptors. dup()'ed file descriptors now
  20. * get the same locks as the original file descriptors, and a close() on
  21. * any file descriptor removes ALL the locks on the file for the current
  22. * process. Since locks still depend on the process id, locks are inherited
  23. * after an exec() but not after a fork(). This agrees with POSIX, and both
  24. * BSD and SVR4 practice.
  25. * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
  26. *
  27. * Scrapped free list which is redundant now that we allocate locks
  28. * dynamically with kmalloc()/kfree().
  29. * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  30. *
  31. * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  32. *
  33. * FL_POSIX locks are created with calls to fcntl() and lockf() through the
  34. * fcntl() system call. They have the semantics described above.
  35. *
  36. * FL_FLOCK locks are created with calls to flock(), through the flock()
  37. * system call, which is new. Old C libraries implement flock() via fcntl()
  38. * and will continue to use the old, broken implementation.
  39. *
  40. * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  41. * with a file pointer (filp). As a result they can be shared by a parent
  42. * process and its children after a fork(). They are removed when the last
  43. * file descriptor referring to the file pointer is closed (unless explicitly
  44. * unlocked).
  45. *
  46. * FL_FLOCK locks never deadlock, an existing lock is always removed before
  47. * upgrading from shared to exclusive (or vice versa). When this happens
  48. * any processes blocked by the current lock are woken up and allowed to
  49. * run before the new lock is applied.
  50. * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
  51. *
  52. * Removed some race conditions in flock_lock_file(), marked other possible
  53. * races. Just grep for FIXME to see them.
  54. * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
  55. *
  56. * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
  57. * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
  58. * once we've checked for blocking and deadlocking.
  59. * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
  60. *
  61. * Initial implementation of mandatory locks. SunOS turned out to be
  62. * a rotten model, so I implemented the "obvious" semantics.
  63. * See 'Documentation/filesystems/mandatory-locking.txt' for details.
  64. * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
  65. *
  66. * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
  67. * check if a file has mandatory locks, used by mmap(), open() and creat() to
  68. * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
  69. * Manual, Section 2.
  70. * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
  71. *
  72. * Tidied up block list handling. Added '/proc/locks' interface.
  73. * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
  74. *
  75. * Fixed deadlock condition for pathological code that mixes calls to
  76. * flock() and fcntl().
  77. * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  78. *
  79. * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
  80. * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  81. * guarantee sensible behaviour in the case where file system modules might
  82. * be compiled with different options than the kernel itself.
  83. * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  84. *
  85. * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
  86. * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  87. * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  88. *
  89. * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
  90. * locks. Changed process synchronisation to avoid dereferencing locks that
  91. * have already been freed.
  92. * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
  93. *
  94. * Made the block list a circular list to minimise searching in the list.
  95. * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
  96. *
  97. * Made mandatory locking a mount option. Default is not to allow mandatory
  98. * locking.
  99. * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
  100. *
  101. * Some adaptations for NFS support.
  102. * Olaf Kirch (okir@monad.swb.de), Dec 1996,
  103. *
  104. * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
  105. * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
  106. *
  107. * Use slab allocator instead of kmalloc/kfree.
  108. * Use generic list implementation from <linux/list.h>.
  109. * Sped up posix_locks_deadlock by only considering blocked locks.
  110. * Matthew Wilcox <willy@debian.org>, March, 2000.
  111. *
  112. * Leases and LOCK_MAND
  113. * Matthew Wilcox <willy@debian.org>, June, 2000.
  114. * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
  115. */
  116. #include <linux/capability.h>
  117. #include <linux/file.h>
  118. #include <linux/fdtable.h>
  119. #include <linux/fs.h>
  120. #include <linux/init.h>
  121. #include <linux/security.h>
  122. #include <linux/slab.h>
  123. #include <linux/syscalls.h>
  124. #include <linux/time.h>
  125. #include <linux/rcupdate.h>
  126. #include <linux/pid_namespace.h>
  127. #include <linux/hashtable.h>
  128. #include <linux/percpu.h>
  129. #define CREATE_TRACE_POINTS
  130. #include <trace/events/filelock.h>
  131. #include <linux/uaccess.h>
  132. #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
  133. #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
  134. #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
  135. #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
  136. #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
  137. static inline bool is_remote_lock(struct file *filp)
  138. {
  139. return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
  140. }
  141. static bool lease_breaking(struct file_lock *fl)
  142. {
  143. return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
  144. }
  145. static int target_leasetype(struct file_lock *fl)
  146. {
  147. if (fl->fl_flags & FL_UNLOCK_PENDING)
  148. return F_UNLCK;
  149. if (fl->fl_flags & FL_DOWNGRADE_PENDING)
  150. return F_RDLCK;
  151. return fl->fl_type;
  152. }
  153. int leases_enable = 1;
  154. int lease_break_time = 45;
  155. /*
  156. * The global file_lock_list is only used for displaying /proc/locks, so we
  157. * keep a list on each CPU, with each list protected by its own spinlock.
  158. * Global serialization is done using file_rwsem.
  159. *
  160. * Note that alterations to the list also require that the relevant flc_lock is
  161. * held.
  162. */
  163. struct file_lock_list_struct {
  164. spinlock_t lock;
  165. struct hlist_head hlist;
  166. };
  167. static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
  168. DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
  169. /*
  170. * The blocked_hash is used to find POSIX lock loops for deadlock detection.
  171. * It is protected by blocked_lock_lock.
  172. *
  173. * We hash locks by lockowner in order to optimize searching for the lock a
  174. * particular lockowner is waiting on.
  175. *
  176. * FIXME: make this value scale via some heuristic? We generally will want more
  177. * buckets when we have more lockowners holding locks, but that's a little
  178. * difficult to determine without knowing what the workload will look like.
  179. */
  180. #define BLOCKED_HASH_BITS 7
  181. static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
  182. /*
  183. * This lock protects the blocked_hash. Generally, if you're accessing it, you
  184. * want to be holding this lock.
  185. *
  186. * In addition, it also protects the fl->fl_block list, and the fl->fl_next
  187. * pointer for file_lock structures that are acting as lock requests (in
  188. * contrast to those that are acting as records of acquired locks).
  189. *
  190. * Note that when we acquire this lock in order to change the above fields,
  191. * we often hold the flc_lock as well. In certain cases, when reading the fields
  192. * protected by this lock, we can skip acquiring it iff we already hold the
  193. * flc_lock.
  194. *
  195. * In particular, adding an entry to the fl_block list requires that you hold
  196. * both the flc_lock and the blocked_lock_lock (acquired in that order).
  197. * Deleting an entry from the list however only requires the file_lock_lock.
  198. */
  199. static DEFINE_SPINLOCK(blocked_lock_lock);
  200. static struct kmem_cache *flctx_cache __read_mostly;
  201. static struct kmem_cache *filelock_cache __read_mostly;
  202. static struct file_lock_context *
  203. locks_get_lock_context(struct inode *inode, int type)
  204. {
  205. struct file_lock_context *ctx;
  206. /* paired with cmpxchg() below */
  207. ctx = smp_load_acquire(&inode->i_flctx);
  208. if (likely(ctx) || type == F_UNLCK)
  209. goto out;
  210. ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
  211. if (!ctx)
  212. goto out;
  213. spin_lock_init(&ctx->flc_lock);
  214. INIT_LIST_HEAD(&ctx->flc_flock);
  215. INIT_LIST_HEAD(&ctx->flc_posix);
  216. INIT_LIST_HEAD(&ctx->flc_lease);
  217. /*
  218. * Assign the pointer if it's not already assigned. If it is, then
  219. * free the context we just allocated.
  220. */
  221. if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
  222. kmem_cache_free(flctx_cache, ctx);
  223. ctx = smp_load_acquire(&inode->i_flctx);
  224. }
  225. out:
  226. trace_locks_get_lock_context(inode, type, ctx);
  227. return ctx;
  228. }
  229. static void
  230. locks_dump_ctx_list(struct list_head *list, char *list_type)
  231. {
  232. struct file_lock *fl;
  233. list_for_each_entry(fl, list, fl_list) {
  234. pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
  235. }
  236. }
  237. static void
  238. locks_check_ctx_lists(struct inode *inode)
  239. {
  240. struct file_lock_context *ctx = inode->i_flctx;
  241. if (unlikely(!list_empty(&ctx->flc_flock) ||
  242. !list_empty(&ctx->flc_posix) ||
  243. !list_empty(&ctx->flc_lease))) {
  244. pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
  245. MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
  246. inode->i_ino);
  247. locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
  248. locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
  249. locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
  250. }
  251. }
  252. static void
  253. locks_check_ctx_file_list(struct file *filp, struct list_head *list,
  254. char *list_type)
  255. {
  256. struct file_lock *fl;
  257. struct inode *inode = locks_inode(filp);
  258. list_for_each_entry(fl, list, fl_list)
  259. if (fl->fl_file == filp)
  260. pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
  261. " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
  262. list_type, MAJOR(inode->i_sb->s_dev),
  263. MINOR(inode->i_sb->s_dev), inode->i_ino,
  264. fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
  265. }
  266. void
  267. locks_free_lock_context(struct inode *inode)
  268. {
  269. struct file_lock_context *ctx = inode->i_flctx;
  270. if (unlikely(ctx)) {
  271. locks_check_ctx_lists(inode);
  272. kmem_cache_free(flctx_cache, ctx);
  273. }
  274. }
  275. static void locks_init_lock_heads(struct file_lock *fl)
  276. {
  277. INIT_HLIST_NODE(&fl->fl_link);
  278. INIT_LIST_HEAD(&fl->fl_list);
  279. INIT_LIST_HEAD(&fl->fl_block);
  280. init_waitqueue_head(&fl->fl_wait);
  281. }
  282. /* Allocate an empty lock structure. */
  283. struct file_lock *locks_alloc_lock(void)
  284. {
  285. struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
  286. if (fl)
  287. locks_init_lock_heads(fl);
  288. return fl;
  289. }
  290. EXPORT_SYMBOL_GPL(locks_alloc_lock);
  291. void locks_release_private(struct file_lock *fl)
  292. {
  293. if (fl->fl_ops) {
  294. if (fl->fl_ops->fl_release_private)
  295. fl->fl_ops->fl_release_private(fl);
  296. fl->fl_ops = NULL;
  297. }
  298. if (fl->fl_lmops) {
  299. if (fl->fl_lmops->lm_put_owner) {
  300. fl->fl_lmops->lm_put_owner(fl->fl_owner);
  301. fl->fl_owner = NULL;
  302. }
  303. fl->fl_lmops = NULL;
  304. }
  305. }
  306. EXPORT_SYMBOL_GPL(locks_release_private);
  307. /* Free a lock which is not in use. */
  308. void locks_free_lock(struct file_lock *fl)
  309. {
  310. BUG_ON(waitqueue_active(&fl->fl_wait));
  311. BUG_ON(!list_empty(&fl->fl_list));
  312. BUG_ON(!list_empty(&fl->fl_block));
  313. BUG_ON(!hlist_unhashed(&fl->fl_link));
  314. locks_release_private(fl);
  315. kmem_cache_free(filelock_cache, fl);
  316. }
  317. EXPORT_SYMBOL(locks_free_lock);
  318. static void
  319. locks_dispose_list(struct list_head *dispose)
  320. {
  321. struct file_lock *fl;
  322. while (!list_empty(dispose)) {
  323. fl = list_first_entry(dispose, struct file_lock, fl_list);
  324. list_del_init(&fl->fl_list);
  325. locks_free_lock(fl);
  326. }
  327. }
  328. void locks_init_lock(struct file_lock *fl)
  329. {
  330. memset(fl, 0, sizeof(struct file_lock));
  331. locks_init_lock_heads(fl);
  332. }
  333. EXPORT_SYMBOL(locks_init_lock);
  334. /*
  335. * Initialize a new lock from an existing file_lock structure.
  336. */
  337. void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
  338. {
  339. new->fl_owner = fl->fl_owner;
  340. new->fl_pid = fl->fl_pid;
  341. new->fl_file = NULL;
  342. new->fl_flags = fl->fl_flags;
  343. new->fl_type = fl->fl_type;
  344. new->fl_start = fl->fl_start;
  345. new->fl_end = fl->fl_end;
  346. new->fl_lmops = fl->fl_lmops;
  347. new->fl_ops = NULL;
  348. if (fl->fl_lmops) {
  349. if (fl->fl_lmops->lm_get_owner)
  350. fl->fl_lmops->lm_get_owner(fl->fl_owner);
  351. }
  352. }
  353. EXPORT_SYMBOL(locks_copy_conflock);
  354. void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
  355. {
  356. /* "new" must be a freshly-initialized lock */
  357. WARN_ON_ONCE(new->fl_ops);
  358. locks_copy_conflock(new, fl);
  359. new->fl_file = fl->fl_file;
  360. new->fl_ops = fl->fl_ops;
  361. if (fl->fl_ops) {
  362. if (fl->fl_ops->fl_copy_lock)
  363. fl->fl_ops->fl_copy_lock(new, fl);
  364. }
  365. }
  366. EXPORT_SYMBOL(locks_copy_lock);
  367. static inline int flock_translate_cmd(int cmd) {
  368. if (cmd & LOCK_MAND)
  369. return cmd & (LOCK_MAND | LOCK_RW);
  370. switch (cmd) {
  371. case LOCK_SH:
  372. return F_RDLCK;
  373. case LOCK_EX:
  374. return F_WRLCK;
  375. case LOCK_UN:
  376. return F_UNLCK;
  377. }
  378. return -EINVAL;
  379. }
  380. /* Fill in a file_lock structure with an appropriate FLOCK lock. */
  381. static struct file_lock *
  382. flock_make_lock(struct file *filp, unsigned int cmd)
  383. {
  384. struct file_lock *fl;
  385. int type = flock_translate_cmd(cmd);
  386. if (type < 0)
  387. return ERR_PTR(type);
  388. fl = locks_alloc_lock();
  389. if (fl == NULL)
  390. return ERR_PTR(-ENOMEM);
  391. fl->fl_file = filp;
  392. fl->fl_owner = filp;
  393. fl->fl_pid = current->tgid;
  394. fl->fl_flags = FL_FLOCK;
  395. fl->fl_type = type;
  396. fl->fl_end = OFFSET_MAX;
  397. return fl;
  398. }
  399. static int assign_type(struct file_lock *fl, long type)
  400. {
  401. switch (type) {
  402. case F_RDLCK:
  403. case F_WRLCK:
  404. case F_UNLCK:
  405. fl->fl_type = type;
  406. break;
  407. default:
  408. return -EINVAL;
  409. }
  410. return 0;
  411. }
  412. static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
  413. struct flock64 *l)
  414. {
  415. switch (l->l_whence) {
  416. case SEEK_SET:
  417. fl->fl_start = 0;
  418. break;
  419. case SEEK_CUR:
  420. fl->fl_start = filp->f_pos;
  421. break;
  422. case SEEK_END:
  423. fl->fl_start = i_size_read(file_inode(filp));
  424. break;
  425. default:
  426. return -EINVAL;
  427. }
  428. if (l->l_start > OFFSET_MAX - fl->fl_start)
  429. return -EOVERFLOW;
  430. fl->fl_start += l->l_start;
  431. if (fl->fl_start < 0)
  432. return -EINVAL;
  433. /* POSIX-1996 leaves the case l->l_len < 0 undefined;
  434. POSIX-2001 defines it. */
  435. if (l->l_len > 0) {
  436. if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
  437. return -EOVERFLOW;
  438. fl->fl_end = fl->fl_start + l->l_len - 1;
  439. } else if (l->l_len < 0) {
  440. if (fl->fl_start + l->l_len < 0)
  441. return -EINVAL;
  442. fl->fl_end = fl->fl_start - 1;
  443. fl->fl_start += l->l_len;
  444. } else
  445. fl->fl_end = OFFSET_MAX;
  446. fl->fl_owner = current->files;
  447. fl->fl_pid = current->tgid;
  448. fl->fl_file = filp;
  449. fl->fl_flags = FL_POSIX;
  450. fl->fl_ops = NULL;
  451. fl->fl_lmops = NULL;
  452. return assign_type(fl, l->l_type);
  453. }
  454. /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
  455. * style lock.
  456. */
  457. static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
  458. struct flock *l)
  459. {
  460. struct flock64 ll = {
  461. .l_type = l->l_type,
  462. .l_whence = l->l_whence,
  463. .l_start = l->l_start,
  464. .l_len = l->l_len,
  465. };
  466. return flock64_to_posix_lock(filp, fl, &ll);
  467. }
  468. /* default lease lock manager operations */
  469. static bool
  470. lease_break_callback(struct file_lock *fl)
  471. {
  472. kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
  473. return false;
  474. }
  475. static void
  476. lease_setup(struct file_lock *fl, void **priv)
  477. {
  478. struct file *filp = fl->fl_file;
  479. struct fasync_struct *fa = *priv;
  480. /*
  481. * fasync_insert_entry() returns the old entry if any. If there was no
  482. * old entry, then it used "priv" and inserted it into the fasync list.
  483. * Clear the pointer to indicate that it shouldn't be freed.
  484. */
  485. if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
  486. *priv = NULL;
  487. __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
  488. }
  489. static const struct lock_manager_operations lease_manager_ops = {
  490. .lm_break = lease_break_callback,
  491. .lm_change = lease_modify,
  492. .lm_setup = lease_setup,
  493. };
  494. /*
  495. * Initialize a lease, use the default lock manager operations
  496. */
  497. static int lease_init(struct file *filp, long type, struct file_lock *fl)
  498. {
  499. if (assign_type(fl, type) != 0)
  500. return -EINVAL;
  501. fl->fl_owner = filp;
  502. fl->fl_pid = current->tgid;
  503. fl->fl_file = filp;
  504. fl->fl_flags = FL_LEASE;
  505. fl->fl_start = 0;
  506. fl->fl_end = OFFSET_MAX;
  507. fl->fl_ops = NULL;
  508. fl->fl_lmops = &lease_manager_ops;
  509. return 0;
  510. }
  511. /* Allocate a file_lock initialised to this type of lease */
  512. static struct file_lock *lease_alloc(struct file *filp, long type)
  513. {
  514. struct file_lock *fl = locks_alloc_lock();
  515. int error = -ENOMEM;
  516. if (fl == NULL)
  517. return ERR_PTR(error);
  518. error = lease_init(filp, type, fl);
  519. if (error) {
  520. locks_free_lock(fl);
  521. return ERR_PTR(error);
  522. }
  523. return fl;
  524. }
  525. /* Check if two locks overlap each other.
  526. */
  527. static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
  528. {
  529. return ((fl1->fl_end >= fl2->fl_start) &&
  530. (fl2->fl_end >= fl1->fl_start));
  531. }
  532. /*
  533. * Check whether two locks have the same owner.
  534. */
  535. static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
  536. {
  537. if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
  538. return fl2->fl_lmops == fl1->fl_lmops &&
  539. fl1->fl_lmops->lm_compare_owner(fl1, fl2);
  540. return fl1->fl_owner == fl2->fl_owner;
  541. }
  542. /* Must be called with the flc_lock held! */
  543. static void locks_insert_global_locks(struct file_lock *fl)
  544. {
  545. struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
  546. percpu_rwsem_assert_held(&file_rwsem);
  547. spin_lock(&fll->lock);
  548. fl->fl_link_cpu = smp_processor_id();
  549. hlist_add_head(&fl->fl_link, &fll->hlist);
  550. spin_unlock(&fll->lock);
  551. }
  552. /* Must be called with the flc_lock held! */
  553. static void locks_delete_global_locks(struct file_lock *fl)
  554. {
  555. struct file_lock_list_struct *fll;
  556. percpu_rwsem_assert_held(&file_rwsem);
  557. /*
  558. * Avoid taking lock if already unhashed. This is safe since this check
  559. * is done while holding the flc_lock, and new insertions into the list
  560. * also require that it be held.
  561. */
  562. if (hlist_unhashed(&fl->fl_link))
  563. return;
  564. fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
  565. spin_lock(&fll->lock);
  566. hlist_del_init(&fl->fl_link);
  567. spin_unlock(&fll->lock);
  568. }
  569. static unsigned long
  570. posix_owner_key(struct file_lock *fl)
  571. {
  572. if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
  573. return fl->fl_lmops->lm_owner_key(fl);
  574. return (unsigned long)fl->fl_owner;
  575. }
  576. static void locks_insert_global_blocked(struct file_lock *waiter)
  577. {
  578. lockdep_assert_held(&blocked_lock_lock);
  579. hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
  580. }
  581. static void locks_delete_global_blocked(struct file_lock *waiter)
  582. {
  583. lockdep_assert_held(&blocked_lock_lock);
  584. hash_del(&waiter->fl_link);
  585. }
  586. /* Remove waiter from blocker's block list.
  587. * When blocker ends up pointing to itself then the list is empty.
  588. *
  589. * Must be called with blocked_lock_lock held.
  590. */
  591. static void __locks_delete_block(struct file_lock *waiter)
  592. {
  593. locks_delete_global_blocked(waiter);
  594. list_del_init(&waiter->fl_block);
  595. waiter->fl_next = NULL;
  596. }
  597. static void locks_delete_block(struct file_lock *waiter)
  598. {
  599. spin_lock(&blocked_lock_lock);
  600. __locks_delete_block(waiter);
  601. spin_unlock(&blocked_lock_lock);
  602. }
  603. /* Insert waiter into blocker's block list.
  604. * We use a circular list so that processes can be easily woken up in
  605. * the order they blocked. The documentation doesn't require this but
  606. * it seems like the reasonable thing to do.
  607. *
  608. * Must be called with both the flc_lock and blocked_lock_lock held. The
  609. * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
  610. * that the flc_lock is also held on insertions we can avoid taking the
  611. * blocked_lock_lock in some cases when we see that the fl_block list is empty.
  612. */
  613. static void __locks_insert_block(struct file_lock *blocker,
  614. struct file_lock *waiter)
  615. {
  616. BUG_ON(!list_empty(&waiter->fl_block));
  617. waiter->fl_next = blocker;
  618. list_add_tail(&waiter->fl_block, &blocker->fl_block);
  619. if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
  620. locks_insert_global_blocked(waiter);
  621. }
  622. /* Must be called with flc_lock held. */
  623. static void locks_insert_block(struct file_lock *blocker,
  624. struct file_lock *waiter)
  625. {
  626. spin_lock(&blocked_lock_lock);
  627. __locks_insert_block(blocker, waiter);
  628. spin_unlock(&blocked_lock_lock);
  629. }
  630. /*
  631. * Wake up processes blocked waiting for blocker.
  632. *
  633. * Must be called with the inode->flc_lock held!
  634. */
  635. static void locks_wake_up_blocks(struct file_lock *blocker)
  636. {
  637. /*
  638. * Avoid taking global lock if list is empty. This is safe since new
  639. * blocked requests are only added to the list under the flc_lock, and
  640. * the flc_lock is always held here. Note that removal from the fl_block
  641. * list does not require the flc_lock, so we must recheck list_empty()
  642. * after acquiring the blocked_lock_lock.
  643. */
  644. if (list_empty(&blocker->fl_block))
  645. return;
  646. spin_lock(&blocked_lock_lock);
  647. while (!list_empty(&blocker->fl_block)) {
  648. struct file_lock *waiter;
  649. waiter = list_first_entry(&blocker->fl_block,
  650. struct file_lock, fl_block);
  651. __locks_delete_block(waiter);
  652. if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
  653. waiter->fl_lmops->lm_notify(waiter);
  654. else
  655. wake_up(&waiter->fl_wait);
  656. }
  657. spin_unlock(&blocked_lock_lock);
  658. }
  659. static void
  660. locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
  661. {
  662. list_add_tail(&fl->fl_list, before);
  663. locks_insert_global_locks(fl);
  664. }
  665. static void
  666. locks_unlink_lock_ctx(struct file_lock *fl)
  667. {
  668. locks_delete_global_locks(fl);
  669. list_del_init(&fl->fl_list);
  670. locks_wake_up_blocks(fl);
  671. }
  672. static void
  673. locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
  674. {
  675. locks_unlink_lock_ctx(fl);
  676. if (dispose)
  677. list_add(&fl->fl_list, dispose);
  678. else
  679. locks_free_lock(fl);
  680. }
  681. /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
  682. * checks for shared/exclusive status of overlapping locks.
  683. */
  684. static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
  685. {
  686. if (sys_fl->fl_type == F_WRLCK)
  687. return 1;
  688. if (caller_fl->fl_type == F_WRLCK)
  689. return 1;
  690. return 0;
  691. }
  692. /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
  693. * checking before calling the locks_conflict().
  694. */
  695. static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
  696. {
  697. /* POSIX locks owned by the same process do not conflict with
  698. * each other.
  699. */
  700. if (posix_same_owner(caller_fl, sys_fl))
  701. return (0);
  702. /* Check whether they overlap */
  703. if (!locks_overlap(caller_fl, sys_fl))
  704. return 0;
  705. return (locks_conflict(caller_fl, sys_fl));
  706. }
  707. /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
  708. * checking before calling the locks_conflict().
  709. */
  710. static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
  711. {
  712. /* FLOCK locks referring to the same filp do not conflict with
  713. * each other.
  714. */
  715. if (caller_fl->fl_file == sys_fl->fl_file)
  716. return (0);
  717. if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
  718. return 0;
  719. return (locks_conflict(caller_fl, sys_fl));
  720. }
  721. void
  722. posix_test_lock(struct file *filp, struct file_lock *fl)
  723. {
  724. struct file_lock *cfl;
  725. struct file_lock_context *ctx;
  726. struct inode *inode = locks_inode(filp);
  727. ctx = smp_load_acquire(&inode->i_flctx);
  728. if (!ctx || list_empty_careful(&ctx->flc_posix)) {
  729. fl->fl_type = F_UNLCK;
  730. return;
  731. }
  732. spin_lock(&ctx->flc_lock);
  733. list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
  734. if (posix_locks_conflict(fl, cfl)) {
  735. locks_copy_conflock(fl, cfl);
  736. goto out;
  737. }
  738. }
  739. fl->fl_type = F_UNLCK;
  740. out:
  741. spin_unlock(&ctx->flc_lock);
  742. return;
  743. }
  744. EXPORT_SYMBOL(posix_test_lock);
  745. /*
  746. * Deadlock detection:
  747. *
  748. * We attempt to detect deadlocks that are due purely to posix file
  749. * locks.
  750. *
  751. * We assume that a task can be waiting for at most one lock at a time.
  752. * So for any acquired lock, the process holding that lock may be
  753. * waiting on at most one other lock. That lock in turns may be held by
  754. * someone waiting for at most one other lock. Given a requested lock
  755. * caller_fl which is about to wait for a conflicting lock block_fl, we
  756. * follow this chain of waiters to ensure we are not about to create a
  757. * cycle.
  758. *
  759. * Since we do this before we ever put a process to sleep on a lock, we
  760. * are ensured that there is never a cycle; that is what guarantees that
  761. * the while() loop in posix_locks_deadlock() eventually completes.
  762. *
  763. * Note: the above assumption may not be true when handling lock
  764. * requests from a broken NFS client. It may also fail in the presence
  765. * of tasks (such as posix threads) sharing the same open file table.
  766. * To handle those cases, we just bail out after a few iterations.
  767. *
  768. * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
  769. * Because the owner is not even nominally tied to a thread of
  770. * execution, the deadlock detection below can't reasonably work well. Just
  771. * skip it for those.
  772. *
  773. * In principle, we could do a more limited deadlock detection on FL_OFDLCK
  774. * locks that just checks for the case where two tasks are attempting to
  775. * upgrade from read to write locks on the same inode.
  776. */
  777. #define MAX_DEADLK_ITERATIONS 10
  778. /* Find a lock that the owner of the given block_fl is blocking on. */
  779. static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
  780. {
  781. struct file_lock *fl;
  782. hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
  783. if (posix_same_owner(fl, block_fl))
  784. return fl->fl_next;
  785. }
  786. return NULL;
  787. }
  788. /* Must be called with the blocked_lock_lock held! */
  789. static int posix_locks_deadlock(struct file_lock *caller_fl,
  790. struct file_lock *block_fl)
  791. {
  792. int i = 0;
  793. lockdep_assert_held(&blocked_lock_lock);
  794. /*
  795. * This deadlock detector can't reasonably detect deadlocks with
  796. * FL_OFDLCK locks, since they aren't owned by a process, per-se.
  797. */
  798. if (IS_OFDLCK(caller_fl))
  799. return 0;
  800. while ((block_fl = what_owner_is_waiting_for(block_fl))) {
  801. if (i++ > MAX_DEADLK_ITERATIONS)
  802. return 0;
  803. if (posix_same_owner(caller_fl, block_fl))
  804. return 1;
  805. }
  806. return 0;
  807. }
  808. /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
  809. * after any leases, but before any posix locks.
  810. *
  811. * Note that if called with an FL_EXISTS argument, the caller may determine
  812. * whether or not a lock was successfully freed by testing the return
  813. * value for -ENOENT.
  814. */
  815. static int flock_lock_inode(struct inode *inode, struct file_lock *request)
  816. {
  817. struct file_lock *new_fl = NULL;
  818. struct file_lock *fl;
  819. struct file_lock_context *ctx;
  820. int error = 0;
  821. bool found = false;
  822. LIST_HEAD(dispose);
  823. ctx = locks_get_lock_context(inode, request->fl_type);
  824. if (!ctx) {
  825. if (request->fl_type != F_UNLCK)
  826. return -ENOMEM;
  827. return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
  828. }
  829. if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
  830. new_fl = locks_alloc_lock();
  831. if (!new_fl)
  832. return -ENOMEM;
  833. }
  834. percpu_down_read_preempt_disable(&file_rwsem);
  835. spin_lock(&ctx->flc_lock);
  836. if (request->fl_flags & FL_ACCESS)
  837. goto find_conflict;
  838. list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
  839. if (request->fl_file != fl->fl_file)
  840. continue;
  841. if (request->fl_type == fl->fl_type)
  842. goto out;
  843. found = true;
  844. locks_delete_lock_ctx(fl, &dispose);
  845. break;
  846. }
  847. if (request->fl_type == F_UNLCK) {
  848. if ((request->fl_flags & FL_EXISTS) && !found)
  849. error = -ENOENT;
  850. goto out;
  851. }
  852. find_conflict:
  853. list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
  854. if (!flock_locks_conflict(request, fl))
  855. continue;
  856. error = -EAGAIN;
  857. if (!(request->fl_flags & FL_SLEEP))
  858. goto out;
  859. error = FILE_LOCK_DEFERRED;
  860. locks_insert_block(fl, request);
  861. goto out;
  862. }
  863. if (request->fl_flags & FL_ACCESS)
  864. goto out;
  865. locks_copy_lock(new_fl, request);
  866. locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
  867. new_fl = NULL;
  868. error = 0;
  869. out:
  870. spin_unlock(&ctx->flc_lock);
  871. percpu_up_read_preempt_enable(&file_rwsem);
  872. if (new_fl)
  873. locks_free_lock(new_fl);
  874. locks_dispose_list(&dispose);
  875. return error;
  876. }
  877. static int posix_lock_inode(struct inode *inode, struct file_lock *request,
  878. struct file_lock *conflock)
  879. {
  880. struct file_lock *fl, *tmp;
  881. struct file_lock *new_fl = NULL;
  882. struct file_lock *new_fl2 = NULL;
  883. struct file_lock *left = NULL;
  884. struct file_lock *right = NULL;
  885. struct file_lock_context *ctx;
  886. int error;
  887. bool added = false;
  888. LIST_HEAD(dispose);
  889. ctx = locks_get_lock_context(inode, request->fl_type);
  890. if (!ctx)
  891. return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
  892. /*
  893. * We may need two file_lock structures for this operation,
  894. * so we get them in advance to avoid races.
  895. *
  896. * In some cases we can be sure, that no new locks will be needed
  897. */
  898. if (!(request->fl_flags & FL_ACCESS) &&
  899. (request->fl_type != F_UNLCK ||
  900. request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
  901. new_fl = locks_alloc_lock();
  902. new_fl2 = locks_alloc_lock();
  903. }
  904. percpu_down_read_preempt_disable(&file_rwsem);
  905. spin_lock(&ctx->flc_lock);
  906. /*
  907. * New lock request. Walk all POSIX locks and look for conflicts. If
  908. * there are any, either return error or put the request on the
  909. * blocker's list of waiters and the global blocked_hash.
  910. */
  911. if (request->fl_type != F_UNLCK) {
  912. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
  913. if (!posix_locks_conflict(request, fl))
  914. continue;
  915. if (conflock)
  916. locks_copy_conflock(conflock, fl);
  917. error = -EAGAIN;
  918. if (!(request->fl_flags & FL_SLEEP))
  919. goto out;
  920. /*
  921. * Deadlock detection and insertion into the blocked
  922. * locks list must be done while holding the same lock!
  923. */
  924. error = -EDEADLK;
  925. spin_lock(&blocked_lock_lock);
  926. if (likely(!posix_locks_deadlock(request, fl))) {
  927. error = FILE_LOCK_DEFERRED;
  928. __locks_insert_block(fl, request);
  929. }
  930. spin_unlock(&blocked_lock_lock);
  931. goto out;
  932. }
  933. }
  934. /* If we're just looking for a conflict, we're done. */
  935. error = 0;
  936. if (request->fl_flags & FL_ACCESS)
  937. goto out;
  938. /* Find the first old lock with the same owner as the new lock */
  939. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
  940. if (posix_same_owner(request, fl))
  941. break;
  942. }
  943. /* Process locks with this owner. */
  944. list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
  945. if (!posix_same_owner(request, fl))
  946. break;
  947. /* Detect adjacent or overlapping regions (if same lock type) */
  948. if (request->fl_type == fl->fl_type) {
  949. /* In all comparisons of start vs end, use
  950. * "start - 1" rather than "end + 1". If end
  951. * is OFFSET_MAX, end + 1 will become negative.
  952. */
  953. if (fl->fl_end < request->fl_start - 1)
  954. continue;
  955. /* If the next lock in the list has entirely bigger
  956. * addresses than the new one, insert the lock here.
  957. */
  958. if (fl->fl_start - 1 > request->fl_end)
  959. break;
  960. /* If we come here, the new and old lock are of the
  961. * same type and adjacent or overlapping. Make one
  962. * lock yielding from the lower start address of both
  963. * locks to the higher end address.
  964. */
  965. if (fl->fl_start > request->fl_start)
  966. fl->fl_start = request->fl_start;
  967. else
  968. request->fl_start = fl->fl_start;
  969. if (fl->fl_end < request->fl_end)
  970. fl->fl_end = request->fl_end;
  971. else
  972. request->fl_end = fl->fl_end;
  973. if (added) {
  974. locks_delete_lock_ctx(fl, &dispose);
  975. continue;
  976. }
  977. request = fl;
  978. added = true;
  979. } else {
  980. /* Processing for different lock types is a bit
  981. * more complex.
  982. */
  983. if (fl->fl_end < request->fl_start)
  984. continue;
  985. if (fl->fl_start > request->fl_end)
  986. break;
  987. if (request->fl_type == F_UNLCK)
  988. added = true;
  989. if (fl->fl_start < request->fl_start)
  990. left = fl;
  991. /* If the next lock in the list has a higher end
  992. * address than the new one, insert the new one here.
  993. */
  994. if (fl->fl_end > request->fl_end) {
  995. right = fl;
  996. break;
  997. }
  998. if (fl->fl_start >= request->fl_start) {
  999. /* The new lock completely replaces an old
  1000. * one (This may happen several times).
  1001. */
  1002. if (added) {
  1003. locks_delete_lock_ctx(fl, &dispose);
  1004. continue;
  1005. }
  1006. /*
  1007. * Replace the old lock with new_fl, and
  1008. * remove the old one. It's safe to do the
  1009. * insert here since we know that we won't be
  1010. * using new_fl later, and that the lock is
  1011. * just replacing an existing lock.
  1012. */
  1013. error = -ENOLCK;
  1014. if (!new_fl)
  1015. goto out;
  1016. locks_copy_lock(new_fl, request);
  1017. request = new_fl;
  1018. new_fl = NULL;
  1019. locks_insert_lock_ctx(request, &fl->fl_list);
  1020. locks_delete_lock_ctx(fl, &dispose);
  1021. added = true;
  1022. }
  1023. }
  1024. }
  1025. /*
  1026. * The above code only modifies existing locks in case of merging or
  1027. * replacing. If new lock(s) need to be inserted all modifications are
  1028. * done below this, so it's safe yet to bail out.
  1029. */
  1030. error = -ENOLCK; /* "no luck" */
  1031. if (right && left == right && !new_fl2)
  1032. goto out;
  1033. error = 0;
  1034. if (!added) {
  1035. if (request->fl_type == F_UNLCK) {
  1036. if (request->fl_flags & FL_EXISTS)
  1037. error = -ENOENT;
  1038. goto out;
  1039. }
  1040. if (!new_fl) {
  1041. error = -ENOLCK;
  1042. goto out;
  1043. }
  1044. locks_copy_lock(new_fl, request);
  1045. locks_insert_lock_ctx(new_fl, &fl->fl_list);
  1046. fl = new_fl;
  1047. new_fl = NULL;
  1048. }
  1049. if (right) {
  1050. if (left == right) {
  1051. /* The new lock breaks the old one in two pieces,
  1052. * so we have to use the second new lock.
  1053. */
  1054. left = new_fl2;
  1055. new_fl2 = NULL;
  1056. locks_copy_lock(left, right);
  1057. locks_insert_lock_ctx(left, &fl->fl_list);
  1058. }
  1059. right->fl_start = request->fl_end + 1;
  1060. locks_wake_up_blocks(right);
  1061. }
  1062. if (left) {
  1063. left->fl_end = request->fl_start - 1;
  1064. locks_wake_up_blocks(left);
  1065. }
  1066. out:
  1067. spin_unlock(&ctx->flc_lock);
  1068. percpu_up_read_preempt_enable(&file_rwsem);
  1069. /*
  1070. * Free any unused locks.
  1071. */
  1072. if (new_fl)
  1073. locks_free_lock(new_fl);
  1074. if (new_fl2)
  1075. locks_free_lock(new_fl2);
  1076. locks_dispose_list(&dispose);
  1077. trace_posix_lock_inode(inode, request, error);
  1078. return error;
  1079. }
  1080. /**
  1081. * posix_lock_file - Apply a POSIX-style lock to a file
  1082. * @filp: The file to apply the lock to
  1083. * @fl: The lock to be applied
  1084. * @conflock: Place to return a copy of the conflicting lock, if found.
  1085. *
  1086. * Add a POSIX style lock to a file.
  1087. * We merge adjacent & overlapping locks whenever possible.
  1088. * POSIX locks are sorted by owner task, then by starting address
  1089. *
  1090. * Note that if called with an FL_EXISTS argument, the caller may determine
  1091. * whether or not a lock was successfully freed by testing the return
  1092. * value for -ENOENT.
  1093. */
  1094. int posix_lock_file(struct file *filp, struct file_lock *fl,
  1095. struct file_lock *conflock)
  1096. {
  1097. return posix_lock_inode(locks_inode(filp), fl, conflock);
  1098. }
  1099. EXPORT_SYMBOL(posix_lock_file);
  1100. /**
  1101. * posix_lock_inode_wait - Apply a POSIX-style lock to a file
  1102. * @inode: inode of file to which lock request should be applied
  1103. * @fl: The lock to be applied
  1104. *
  1105. * Apply a POSIX style lock request to an inode.
  1106. */
  1107. static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
  1108. {
  1109. int error;
  1110. might_sleep ();
  1111. for (;;) {
  1112. error = posix_lock_inode(inode, fl, NULL);
  1113. if (error != FILE_LOCK_DEFERRED)
  1114. break;
  1115. error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
  1116. if (!error)
  1117. continue;
  1118. locks_delete_block(fl);
  1119. break;
  1120. }
  1121. return error;
  1122. }
  1123. #ifdef CONFIG_MANDATORY_FILE_LOCKING
  1124. /**
  1125. * locks_mandatory_locked - Check for an active lock
  1126. * @file: the file to check
  1127. *
  1128. * Searches the inode's list of locks to find any POSIX locks which conflict.
  1129. * This function is called from locks_verify_locked() only.
  1130. */
  1131. int locks_mandatory_locked(struct file *file)
  1132. {
  1133. int ret;
  1134. struct inode *inode = locks_inode(file);
  1135. struct file_lock_context *ctx;
  1136. struct file_lock *fl;
  1137. ctx = smp_load_acquire(&inode->i_flctx);
  1138. if (!ctx || list_empty_careful(&ctx->flc_posix))
  1139. return 0;
  1140. /*
  1141. * Search the lock list for this inode for any POSIX locks.
  1142. */
  1143. spin_lock(&ctx->flc_lock);
  1144. ret = 0;
  1145. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
  1146. if (fl->fl_owner != current->files &&
  1147. fl->fl_owner != file) {
  1148. ret = -EAGAIN;
  1149. break;
  1150. }
  1151. }
  1152. spin_unlock(&ctx->flc_lock);
  1153. return ret;
  1154. }
  1155. /**
  1156. * locks_mandatory_area - Check for a conflicting lock
  1157. * @inode: the file to check
  1158. * @filp: how the file was opened (if it was)
  1159. * @start: first byte in the file to check
  1160. * @end: lastbyte in the file to check
  1161. * @type: %F_WRLCK for a write lock, else %F_RDLCK
  1162. *
  1163. * Searches the inode's list of locks to find any POSIX locks which conflict.
  1164. */
  1165. int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
  1166. loff_t end, unsigned char type)
  1167. {
  1168. struct file_lock fl;
  1169. int error;
  1170. bool sleep = false;
  1171. locks_init_lock(&fl);
  1172. fl.fl_pid = current->tgid;
  1173. fl.fl_file = filp;
  1174. fl.fl_flags = FL_POSIX | FL_ACCESS;
  1175. if (filp && !(filp->f_flags & O_NONBLOCK))
  1176. sleep = true;
  1177. fl.fl_type = type;
  1178. fl.fl_start = start;
  1179. fl.fl_end = end;
  1180. for (;;) {
  1181. if (filp) {
  1182. fl.fl_owner = filp;
  1183. fl.fl_flags &= ~FL_SLEEP;
  1184. error = posix_lock_inode(inode, &fl, NULL);
  1185. if (!error)
  1186. break;
  1187. }
  1188. if (sleep)
  1189. fl.fl_flags |= FL_SLEEP;
  1190. fl.fl_owner = current->files;
  1191. error = posix_lock_inode(inode, &fl, NULL);
  1192. if (error != FILE_LOCK_DEFERRED)
  1193. break;
  1194. error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
  1195. if (!error) {
  1196. /*
  1197. * If we've been sleeping someone might have
  1198. * changed the permissions behind our back.
  1199. */
  1200. if (__mandatory_lock(inode))
  1201. continue;
  1202. }
  1203. locks_delete_block(&fl);
  1204. break;
  1205. }
  1206. return error;
  1207. }
  1208. EXPORT_SYMBOL(locks_mandatory_area);
  1209. #endif /* CONFIG_MANDATORY_FILE_LOCKING */
  1210. static void lease_clear_pending(struct file_lock *fl, int arg)
  1211. {
  1212. switch (arg) {
  1213. case F_UNLCK:
  1214. fl->fl_flags &= ~FL_UNLOCK_PENDING;
  1215. /* fall through: */
  1216. case F_RDLCK:
  1217. fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
  1218. }
  1219. }
  1220. /* We already had a lease on this file; just change its type */
  1221. int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
  1222. {
  1223. int error = assign_type(fl, arg);
  1224. if (error)
  1225. return error;
  1226. lease_clear_pending(fl, arg);
  1227. locks_wake_up_blocks(fl);
  1228. if (arg == F_UNLCK) {
  1229. struct file *filp = fl->fl_file;
  1230. f_delown(filp);
  1231. filp->f_owner.signum = 0;
  1232. fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
  1233. if (fl->fl_fasync != NULL) {
  1234. printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
  1235. fl->fl_fasync = NULL;
  1236. }
  1237. locks_delete_lock_ctx(fl, dispose);
  1238. }
  1239. return 0;
  1240. }
  1241. EXPORT_SYMBOL(lease_modify);
  1242. static bool past_time(unsigned long then)
  1243. {
  1244. if (!then)
  1245. /* 0 is a special value meaning "this never expires": */
  1246. return false;
  1247. return time_after(jiffies, then);
  1248. }
  1249. static void time_out_leases(struct inode *inode, struct list_head *dispose)
  1250. {
  1251. struct file_lock_context *ctx = inode->i_flctx;
  1252. struct file_lock *fl, *tmp;
  1253. lockdep_assert_held(&ctx->flc_lock);
  1254. list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
  1255. trace_time_out_leases(inode, fl);
  1256. if (past_time(fl->fl_downgrade_time))
  1257. lease_modify(fl, F_RDLCK, dispose);
  1258. if (past_time(fl->fl_break_time))
  1259. lease_modify(fl, F_UNLCK, dispose);
  1260. }
  1261. }
  1262. static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
  1263. {
  1264. if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
  1265. return false;
  1266. if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
  1267. return false;
  1268. return locks_conflict(breaker, lease);
  1269. }
  1270. static bool
  1271. any_leases_conflict(struct inode *inode, struct file_lock *breaker)
  1272. {
  1273. struct file_lock_context *ctx = inode->i_flctx;
  1274. struct file_lock *fl;
  1275. lockdep_assert_held(&ctx->flc_lock);
  1276. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1277. if (leases_conflict(fl, breaker))
  1278. return true;
  1279. }
  1280. return false;
  1281. }
  1282. /**
  1283. * __break_lease - revoke all outstanding leases on file
  1284. * @inode: the inode of the file to return
  1285. * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
  1286. * break all leases
  1287. * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
  1288. * only delegations
  1289. *
  1290. * break_lease (inlined for speed) has checked there already is at least
  1291. * some kind of lock (maybe a lease) on this file. Leases are broken on
  1292. * a call to open() or truncate(). This function can sleep unless you
  1293. * specified %O_NONBLOCK to your open().
  1294. */
  1295. int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
  1296. {
  1297. int error = 0;
  1298. struct file_lock_context *ctx;
  1299. struct file_lock *new_fl, *fl, *tmp;
  1300. unsigned long break_time;
  1301. int want_write = (mode & O_ACCMODE) != O_RDONLY;
  1302. LIST_HEAD(dispose);
  1303. new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
  1304. if (IS_ERR(new_fl))
  1305. return PTR_ERR(new_fl);
  1306. new_fl->fl_flags = type;
  1307. /* typically we will check that ctx is non-NULL before calling */
  1308. ctx = smp_load_acquire(&inode->i_flctx);
  1309. if (!ctx) {
  1310. WARN_ON_ONCE(1);
  1311. return error;
  1312. }
  1313. percpu_down_read_preempt_disable(&file_rwsem);
  1314. spin_lock(&ctx->flc_lock);
  1315. time_out_leases(inode, &dispose);
  1316. if (!any_leases_conflict(inode, new_fl))
  1317. goto out;
  1318. break_time = 0;
  1319. if (lease_break_time > 0) {
  1320. break_time = jiffies + lease_break_time * HZ;
  1321. if (break_time == 0)
  1322. break_time++; /* so that 0 means no break time */
  1323. }
  1324. list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
  1325. if (!leases_conflict(fl, new_fl))
  1326. continue;
  1327. if (want_write) {
  1328. if (fl->fl_flags & FL_UNLOCK_PENDING)
  1329. continue;
  1330. fl->fl_flags |= FL_UNLOCK_PENDING;
  1331. fl->fl_break_time = break_time;
  1332. } else {
  1333. if (lease_breaking(fl))
  1334. continue;
  1335. fl->fl_flags |= FL_DOWNGRADE_PENDING;
  1336. fl->fl_downgrade_time = break_time;
  1337. }
  1338. if (fl->fl_lmops->lm_break(fl))
  1339. locks_delete_lock_ctx(fl, &dispose);
  1340. }
  1341. if (list_empty(&ctx->flc_lease))
  1342. goto out;
  1343. if (mode & O_NONBLOCK) {
  1344. trace_break_lease_noblock(inode, new_fl);
  1345. error = -EWOULDBLOCK;
  1346. goto out;
  1347. }
  1348. restart:
  1349. fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
  1350. break_time = fl->fl_break_time;
  1351. if (break_time != 0)
  1352. break_time -= jiffies;
  1353. if (break_time == 0)
  1354. break_time++;
  1355. locks_insert_block(fl, new_fl);
  1356. trace_break_lease_block(inode, new_fl);
  1357. spin_unlock(&ctx->flc_lock);
  1358. percpu_up_read_preempt_enable(&file_rwsem);
  1359. locks_dispose_list(&dispose);
  1360. error = wait_event_interruptible_timeout(new_fl->fl_wait,
  1361. !new_fl->fl_next, break_time);
  1362. percpu_down_read_preempt_disable(&file_rwsem);
  1363. spin_lock(&ctx->flc_lock);
  1364. trace_break_lease_unblock(inode, new_fl);
  1365. locks_delete_block(new_fl);
  1366. if (error >= 0) {
  1367. /*
  1368. * Wait for the next conflicting lease that has not been
  1369. * broken yet
  1370. */
  1371. if (error == 0)
  1372. time_out_leases(inode, &dispose);
  1373. if (any_leases_conflict(inode, new_fl))
  1374. goto restart;
  1375. error = 0;
  1376. }
  1377. out:
  1378. spin_unlock(&ctx->flc_lock);
  1379. percpu_up_read_preempt_enable(&file_rwsem);
  1380. locks_dispose_list(&dispose);
  1381. locks_free_lock(new_fl);
  1382. return error;
  1383. }
  1384. EXPORT_SYMBOL(__break_lease);
  1385. /**
  1386. * lease_get_mtime - update modified time of an inode with exclusive lease
  1387. * @inode: the inode
  1388. * @time: pointer to a timespec which contains the last modified time
  1389. *
  1390. * This is to force NFS clients to flush their caches for files with
  1391. * exclusive leases. The justification is that if someone has an
  1392. * exclusive lease, then they could be modifying it.
  1393. */
  1394. void lease_get_mtime(struct inode *inode, struct timespec64 *time)
  1395. {
  1396. bool has_lease = false;
  1397. struct file_lock_context *ctx;
  1398. struct file_lock *fl;
  1399. ctx = smp_load_acquire(&inode->i_flctx);
  1400. if (ctx && !list_empty_careful(&ctx->flc_lease)) {
  1401. spin_lock(&ctx->flc_lock);
  1402. fl = list_first_entry_or_null(&ctx->flc_lease,
  1403. struct file_lock, fl_list);
  1404. if (fl && (fl->fl_type == F_WRLCK))
  1405. has_lease = true;
  1406. spin_unlock(&ctx->flc_lock);
  1407. }
  1408. if (has_lease)
  1409. *time = current_time(inode);
  1410. }
  1411. EXPORT_SYMBOL(lease_get_mtime);
  1412. /**
  1413. * fcntl_getlease - Enquire what lease is currently active
  1414. * @filp: the file
  1415. *
  1416. * The value returned by this function will be one of
  1417. * (if no lease break is pending):
  1418. *
  1419. * %F_RDLCK to indicate a shared lease is held.
  1420. *
  1421. * %F_WRLCK to indicate an exclusive lease is held.
  1422. *
  1423. * %F_UNLCK to indicate no lease is held.
  1424. *
  1425. * (if a lease break is pending):
  1426. *
  1427. * %F_RDLCK to indicate an exclusive lease needs to be
  1428. * changed to a shared lease (or removed).
  1429. *
  1430. * %F_UNLCK to indicate the lease needs to be removed.
  1431. *
  1432. * XXX: sfr & willy disagree over whether F_INPROGRESS
  1433. * should be returned to userspace.
  1434. */
  1435. int fcntl_getlease(struct file *filp)
  1436. {
  1437. struct file_lock *fl;
  1438. struct inode *inode = locks_inode(filp);
  1439. struct file_lock_context *ctx;
  1440. int type = F_UNLCK;
  1441. LIST_HEAD(dispose);
  1442. ctx = smp_load_acquire(&inode->i_flctx);
  1443. if (ctx && !list_empty_careful(&ctx->flc_lease)) {
  1444. percpu_down_read_preempt_disable(&file_rwsem);
  1445. spin_lock(&ctx->flc_lock);
  1446. time_out_leases(inode, &dispose);
  1447. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1448. if (fl->fl_file != filp)
  1449. continue;
  1450. type = target_leasetype(fl);
  1451. break;
  1452. }
  1453. spin_unlock(&ctx->flc_lock);
  1454. percpu_up_read_preempt_enable(&file_rwsem);
  1455. locks_dispose_list(&dispose);
  1456. }
  1457. return type;
  1458. }
  1459. /**
  1460. * check_conflicting_open - see if the given dentry points to a file that has
  1461. * an existing open that would conflict with the
  1462. * desired lease.
  1463. * @dentry: dentry to check
  1464. * @arg: type of lease that we're trying to acquire
  1465. * @flags: current lock flags
  1466. *
  1467. * Check to see if there's an existing open fd on this file that would
  1468. * conflict with the lease we're trying to set.
  1469. */
  1470. static int
  1471. check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
  1472. {
  1473. int ret = 0;
  1474. struct inode *inode = dentry->d_inode;
  1475. if (flags & FL_LAYOUT)
  1476. return 0;
  1477. if ((arg == F_RDLCK) &&
  1478. (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
  1479. return -EAGAIN;
  1480. if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
  1481. (atomic_read(&inode->i_count) > 1)))
  1482. ret = -EAGAIN;
  1483. return ret;
  1484. }
  1485. static int
  1486. generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
  1487. {
  1488. struct file_lock *fl, *my_fl = NULL, *lease;
  1489. struct dentry *dentry = filp->f_path.dentry;
  1490. struct inode *inode = dentry->d_inode;
  1491. struct file_lock_context *ctx;
  1492. bool is_deleg = (*flp)->fl_flags & FL_DELEG;
  1493. int error;
  1494. LIST_HEAD(dispose);
  1495. lease = *flp;
  1496. trace_generic_add_lease(inode, lease);
  1497. /* Note that arg is never F_UNLCK here */
  1498. ctx = locks_get_lock_context(inode, arg);
  1499. if (!ctx)
  1500. return -ENOMEM;
  1501. /*
  1502. * In the delegation case we need mutual exclusion with
  1503. * a number of operations that take the i_mutex. We trylock
  1504. * because delegations are an optional optimization, and if
  1505. * there's some chance of a conflict--we'd rather not
  1506. * bother, maybe that's a sign this just isn't a good file to
  1507. * hand out a delegation on.
  1508. */
  1509. if (is_deleg && !inode_trylock(inode))
  1510. return -EAGAIN;
  1511. if (is_deleg && arg == F_WRLCK) {
  1512. /* Write delegations are not currently supported: */
  1513. inode_unlock(inode);
  1514. WARN_ON_ONCE(1);
  1515. return -EINVAL;
  1516. }
  1517. percpu_down_read_preempt_disable(&file_rwsem);
  1518. spin_lock(&ctx->flc_lock);
  1519. time_out_leases(inode, &dispose);
  1520. error = check_conflicting_open(dentry, arg, lease->fl_flags);
  1521. if (error)
  1522. goto out;
  1523. /*
  1524. * At this point, we know that if there is an exclusive
  1525. * lease on this file, then we hold it on this filp
  1526. * (otherwise our open of this file would have blocked).
  1527. * And if we are trying to acquire an exclusive lease,
  1528. * then the file is not open by anyone (including us)
  1529. * except for this filp.
  1530. */
  1531. error = -EAGAIN;
  1532. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1533. if (fl->fl_file == filp &&
  1534. fl->fl_owner == lease->fl_owner) {
  1535. my_fl = fl;
  1536. continue;
  1537. }
  1538. /*
  1539. * No exclusive leases if someone else has a lease on
  1540. * this file:
  1541. */
  1542. if (arg == F_WRLCK)
  1543. goto out;
  1544. /*
  1545. * Modifying our existing lease is OK, but no getting a
  1546. * new lease if someone else is opening for write:
  1547. */
  1548. if (fl->fl_flags & FL_UNLOCK_PENDING)
  1549. goto out;
  1550. }
  1551. if (my_fl != NULL) {
  1552. lease = my_fl;
  1553. error = lease->fl_lmops->lm_change(lease, arg, &dispose);
  1554. if (error)
  1555. goto out;
  1556. goto out_setup;
  1557. }
  1558. error = -EINVAL;
  1559. if (!leases_enable)
  1560. goto out;
  1561. locks_insert_lock_ctx(lease, &ctx->flc_lease);
  1562. /*
  1563. * The check in break_lease() is lockless. It's possible for another
  1564. * open to race in after we did the earlier check for a conflicting
  1565. * open but before the lease was inserted. Check again for a
  1566. * conflicting open and cancel the lease if there is one.
  1567. *
  1568. * We also add a barrier here to ensure that the insertion of the lock
  1569. * precedes these checks.
  1570. */
  1571. smp_mb();
  1572. error = check_conflicting_open(dentry, arg, lease->fl_flags);
  1573. if (error) {
  1574. locks_unlink_lock_ctx(lease);
  1575. goto out;
  1576. }
  1577. out_setup:
  1578. if (lease->fl_lmops->lm_setup)
  1579. lease->fl_lmops->lm_setup(lease, priv);
  1580. out:
  1581. spin_unlock(&ctx->flc_lock);
  1582. percpu_up_read_preempt_enable(&file_rwsem);
  1583. locks_dispose_list(&dispose);
  1584. if (is_deleg)
  1585. inode_unlock(inode);
  1586. if (!error && !my_fl)
  1587. *flp = NULL;
  1588. return error;
  1589. }
  1590. static int generic_delete_lease(struct file *filp, void *owner)
  1591. {
  1592. int error = -EAGAIN;
  1593. struct file_lock *fl, *victim = NULL;
  1594. struct inode *inode = locks_inode(filp);
  1595. struct file_lock_context *ctx;
  1596. LIST_HEAD(dispose);
  1597. ctx = smp_load_acquire(&inode->i_flctx);
  1598. if (!ctx) {
  1599. trace_generic_delete_lease(inode, NULL);
  1600. return error;
  1601. }
  1602. percpu_down_read_preempt_disable(&file_rwsem);
  1603. spin_lock(&ctx->flc_lock);
  1604. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1605. if (fl->fl_file == filp &&
  1606. fl->fl_owner == owner) {
  1607. victim = fl;
  1608. break;
  1609. }
  1610. }
  1611. trace_generic_delete_lease(inode, victim);
  1612. if (victim)
  1613. error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
  1614. spin_unlock(&ctx->flc_lock);
  1615. percpu_up_read_preempt_enable(&file_rwsem);
  1616. locks_dispose_list(&dispose);
  1617. return error;
  1618. }
  1619. /**
  1620. * generic_setlease - sets a lease on an open file
  1621. * @filp: file pointer
  1622. * @arg: type of lease to obtain
  1623. * @flp: input - file_lock to use, output - file_lock inserted
  1624. * @priv: private data for lm_setup (may be NULL if lm_setup
  1625. * doesn't require it)
  1626. *
  1627. * The (input) flp->fl_lmops->lm_break function is required
  1628. * by break_lease().
  1629. */
  1630. int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
  1631. void **priv)
  1632. {
  1633. struct inode *inode = locks_inode(filp);
  1634. int error;
  1635. if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
  1636. return -EACCES;
  1637. if (!S_ISREG(inode->i_mode))
  1638. return -EINVAL;
  1639. error = security_file_lock(filp, arg);
  1640. if (error)
  1641. return error;
  1642. switch (arg) {
  1643. case F_UNLCK:
  1644. return generic_delete_lease(filp, *priv);
  1645. case F_RDLCK:
  1646. case F_WRLCK:
  1647. if (!(*flp)->fl_lmops->lm_break) {
  1648. WARN_ON_ONCE(1);
  1649. return -ENOLCK;
  1650. }
  1651. return generic_add_lease(filp, arg, flp, priv);
  1652. default:
  1653. return -EINVAL;
  1654. }
  1655. }
  1656. EXPORT_SYMBOL(generic_setlease);
  1657. /**
  1658. * vfs_setlease - sets a lease on an open file
  1659. * @filp: file pointer
  1660. * @arg: type of lease to obtain
  1661. * @lease: file_lock to use when adding a lease
  1662. * @priv: private info for lm_setup when adding a lease (may be
  1663. * NULL if lm_setup doesn't require it)
  1664. *
  1665. * Call this to establish a lease on the file. The "lease" argument is not
  1666. * used for F_UNLCK requests and may be NULL. For commands that set or alter
  1667. * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
  1668. * set; if not, this function will return -ENOLCK (and generate a scary-looking
  1669. * stack trace).
  1670. *
  1671. * The "priv" pointer is passed directly to the lm_setup function as-is. It
  1672. * may be NULL if the lm_setup operation doesn't require it.
  1673. */
  1674. int
  1675. vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
  1676. {
  1677. if (filp->f_op->setlease && is_remote_lock(filp))
  1678. return filp->f_op->setlease(filp, arg, lease, priv);
  1679. else
  1680. return generic_setlease(filp, arg, lease, priv);
  1681. }
  1682. EXPORT_SYMBOL_GPL(vfs_setlease);
  1683. static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
  1684. {
  1685. struct file_lock *fl;
  1686. struct fasync_struct *new;
  1687. int error;
  1688. fl = lease_alloc(filp, arg);
  1689. if (IS_ERR(fl))
  1690. return PTR_ERR(fl);
  1691. new = fasync_alloc();
  1692. if (!new) {
  1693. locks_free_lock(fl);
  1694. return -ENOMEM;
  1695. }
  1696. new->fa_fd = fd;
  1697. error = vfs_setlease(filp, arg, &fl, (void **)&new);
  1698. if (fl)
  1699. locks_free_lock(fl);
  1700. if (new)
  1701. fasync_free(new);
  1702. return error;
  1703. }
  1704. /**
  1705. * fcntl_setlease - sets a lease on an open file
  1706. * @fd: open file descriptor
  1707. * @filp: file pointer
  1708. * @arg: type of lease to obtain
  1709. *
  1710. * Call this fcntl to establish a lease on the file.
  1711. * Note that you also need to call %F_SETSIG to
  1712. * receive a signal when the lease is broken.
  1713. */
  1714. int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
  1715. {
  1716. if (arg == F_UNLCK)
  1717. return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
  1718. return do_fcntl_add_lease(fd, filp, arg);
  1719. }
  1720. /**
  1721. * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
  1722. * @inode: inode of the file to apply to
  1723. * @fl: The lock to be applied
  1724. *
  1725. * Apply a FLOCK style lock request to an inode.
  1726. */
  1727. static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
  1728. {
  1729. int error;
  1730. might_sleep();
  1731. for (;;) {
  1732. error = flock_lock_inode(inode, fl);
  1733. if (error != FILE_LOCK_DEFERRED)
  1734. break;
  1735. error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
  1736. if (!error)
  1737. continue;
  1738. locks_delete_block(fl);
  1739. break;
  1740. }
  1741. return error;
  1742. }
  1743. /**
  1744. * locks_lock_inode_wait - Apply a lock to an inode
  1745. * @inode: inode of the file to apply to
  1746. * @fl: The lock to be applied
  1747. *
  1748. * Apply a POSIX or FLOCK style lock request to an inode.
  1749. */
  1750. int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
  1751. {
  1752. int res = 0;
  1753. switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
  1754. case FL_POSIX:
  1755. res = posix_lock_inode_wait(inode, fl);
  1756. break;
  1757. case FL_FLOCK:
  1758. res = flock_lock_inode_wait(inode, fl);
  1759. break;
  1760. default:
  1761. BUG();
  1762. }
  1763. return res;
  1764. }
  1765. EXPORT_SYMBOL(locks_lock_inode_wait);
  1766. /**
  1767. * sys_flock: - flock() system call.
  1768. * @fd: the file descriptor to lock.
  1769. * @cmd: the type of lock to apply.
  1770. *
  1771. * Apply a %FL_FLOCK style lock to an open file descriptor.
  1772. * The @cmd can be one of:
  1773. *
  1774. * - %LOCK_SH -- a shared lock.
  1775. * - %LOCK_EX -- an exclusive lock.
  1776. * - %LOCK_UN -- remove an existing lock.
  1777. * - %LOCK_MAND -- a 'mandatory' flock.
  1778. * This exists to emulate Windows Share Modes.
  1779. *
  1780. * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
  1781. * processes read and write access respectively.
  1782. */
  1783. SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
  1784. {
  1785. struct fd f = fdget(fd);
  1786. struct file_lock *lock;
  1787. int can_sleep, unlock;
  1788. int error;
  1789. error = -EBADF;
  1790. if (!f.file)
  1791. goto out;
  1792. can_sleep = !(cmd & LOCK_NB);
  1793. cmd &= ~LOCK_NB;
  1794. unlock = (cmd == LOCK_UN);
  1795. if (!unlock && !(cmd & LOCK_MAND) &&
  1796. !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
  1797. goto out_putf;
  1798. lock = flock_make_lock(f.file, cmd);
  1799. if (IS_ERR(lock)) {
  1800. error = PTR_ERR(lock);
  1801. goto out_putf;
  1802. }
  1803. if (can_sleep)
  1804. lock->fl_flags |= FL_SLEEP;
  1805. error = security_file_lock(f.file, lock->fl_type);
  1806. if (error)
  1807. goto out_free;
  1808. if (f.file->f_op->flock && is_remote_lock(f.file))
  1809. error = f.file->f_op->flock(f.file,
  1810. (can_sleep) ? F_SETLKW : F_SETLK,
  1811. lock);
  1812. else
  1813. error = locks_lock_file_wait(f.file, lock);
  1814. out_free:
  1815. locks_free_lock(lock);
  1816. out_putf:
  1817. fdput(f);
  1818. out:
  1819. return error;
  1820. }
  1821. /**
  1822. * vfs_test_lock - test file byte range lock
  1823. * @filp: The file to test lock for
  1824. * @fl: The lock to test; also used to hold result
  1825. *
  1826. * Returns -ERRNO on failure. Indicates presence of conflicting lock by
  1827. * setting conf->fl_type to something other than F_UNLCK.
  1828. */
  1829. int vfs_test_lock(struct file *filp, struct file_lock *fl)
  1830. {
  1831. if (filp->f_op->lock && is_remote_lock(filp))
  1832. return filp->f_op->lock(filp, F_GETLK, fl);
  1833. posix_test_lock(filp, fl);
  1834. return 0;
  1835. }
  1836. EXPORT_SYMBOL_GPL(vfs_test_lock);
  1837. /**
  1838. * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
  1839. * @fl: The file_lock who's fl_pid should be translated
  1840. * @ns: The namespace into which the pid should be translated
  1841. *
  1842. * Used to tranlate a fl_pid into a namespace virtual pid number
  1843. */
  1844. static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
  1845. {
  1846. pid_t vnr;
  1847. struct pid *pid;
  1848. if (IS_OFDLCK(fl))
  1849. return -1;
  1850. if (IS_REMOTELCK(fl))
  1851. return fl->fl_pid;
  1852. rcu_read_lock();
  1853. pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
  1854. vnr = pid_nr_ns(pid, ns);
  1855. rcu_read_unlock();
  1856. return vnr;
  1857. }
  1858. static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
  1859. {
  1860. flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
  1861. #if BITS_PER_LONG == 32
  1862. /*
  1863. * Make sure we can represent the posix lock via
  1864. * legacy 32bit flock.
  1865. */
  1866. if (fl->fl_start > OFFT_OFFSET_MAX)
  1867. return -EOVERFLOW;
  1868. if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
  1869. return -EOVERFLOW;
  1870. #endif
  1871. flock->l_start = fl->fl_start;
  1872. flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
  1873. fl->fl_end - fl->fl_start + 1;
  1874. flock->l_whence = 0;
  1875. flock->l_type = fl->fl_type;
  1876. return 0;
  1877. }
  1878. #if BITS_PER_LONG == 32
  1879. static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
  1880. {
  1881. flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
  1882. flock->l_start = fl->fl_start;
  1883. flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
  1884. fl->fl_end - fl->fl_start + 1;
  1885. flock->l_whence = 0;
  1886. flock->l_type = fl->fl_type;
  1887. }
  1888. #endif
  1889. /* Report the first existing lock that would conflict with l.
  1890. * This implements the F_GETLK command of fcntl().
  1891. */
  1892. int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
  1893. {
  1894. struct file_lock *fl;
  1895. int error;
  1896. fl = locks_alloc_lock();
  1897. if (fl == NULL)
  1898. return -ENOMEM;
  1899. error = -EINVAL;
  1900. if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
  1901. goto out;
  1902. error = flock_to_posix_lock(filp, fl, flock);
  1903. if (error)
  1904. goto out;
  1905. if (cmd == F_OFD_GETLK) {
  1906. error = -EINVAL;
  1907. if (flock->l_pid != 0)
  1908. goto out;
  1909. cmd = F_GETLK;
  1910. fl->fl_flags |= FL_OFDLCK;
  1911. fl->fl_owner = filp;
  1912. }
  1913. error = vfs_test_lock(filp, fl);
  1914. if (error)
  1915. goto out;
  1916. flock->l_type = fl->fl_type;
  1917. if (fl->fl_type != F_UNLCK) {
  1918. error = posix_lock_to_flock(flock, fl);
  1919. if (error)
  1920. goto out;
  1921. }
  1922. out:
  1923. locks_free_lock(fl);
  1924. return error;
  1925. }
  1926. /**
  1927. * vfs_lock_file - file byte range lock
  1928. * @filp: The file to apply the lock to
  1929. * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
  1930. * @fl: The lock to be applied
  1931. * @conf: Place to return a copy of the conflicting lock, if found.
  1932. *
  1933. * A caller that doesn't care about the conflicting lock may pass NULL
  1934. * as the final argument.
  1935. *
  1936. * If the filesystem defines a private ->lock() method, then @conf will
  1937. * be left unchanged; so a caller that cares should initialize it to
  1938. * some acceptable default.
  1939. *
  1940. * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
  1941. * locks, the ->lock() interface may return asynchronously, before the lock has
  1942. * been granted or denied by the underlying filesystem, if (and only if)
  1943. * lm_grant is set. Callers expecting ->lock() to return asynchronously
  1944. * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
  1945. * the request is for a blocking lock. When ->lock() does return asynchronously,
  1946. * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
  1947. * request completes.
  1948. * If the request is for non-blocking lock the file system should return
  1949. * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
  1950. * with the result. If the request timed out the callback routine will return a
  1951. * nonzero return code and the file system should release the lock. The file
  1952. * system is also responsible to keep a corresponding posix lock when it
  1953. * grants a lock so the VFS can find out which locks are locally held and do
  1954. * the correct lock cleanup when required.
  1955. * The underlying filesystem must not drop the kernel lock or call
  1956. * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
  1957. * return code.
  1958. */
  1959. int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
  1960. {
  1961. if (filp->f_op->lock && is_remote_lock(filp))
  1962. return filp->f_op->lock(filp, cmd, fl);
  1963. else
  1964. return posix_lock_file(filp, fl, conf);
  1965. }
  1966. EXPORT_SYMBOL_GPL(vfs_lock_file);
  1967. static int do_lock_file_wait(struct file *filp, unsigned int cmd,
  1968. struct file_lock *fl)
  1969. {
  1970. int error;
  1971. error = security_file_lock(filp, fl->fl_type);
  1972. if (error)
  1973. return error;
  1974. for (;;) {
  1975. error = vfs_lock_file(filp, cmd, fl, NULL);
  1976. if (error != FILE_LOCK_DEFERRED)
  1977. break;
  1978. error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
  1979. if (!error)
  1980. continue;
  1981. locks_delete_block(fl);
  1982. break;
  1983. }
  1984. return error;
  1985. }
  1986. /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
  1987. static int
  1988. check_fmode_for_setlk(struct file_lock *fl)
  1989. {
  1990. switch (fl->fl_type) {
  1991. case F_RDLCK:
  1992. if (!(fl->fl_file->f_mode & FMODE_READ))
  1993. return -EBADF;
  1994. break;
  1995. case F_WRLCK:
  1996. if (!(fl->fl_file->f_mode & FMODE_WRITE))
  1997. return -EBADF;
  1998. }
  1999. return 0;
  2000. }
  2001. /* Apply the lock described by l to an open file descriptor.
  2002. * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  2003. */
  2004. int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
  2005. struct flock *flock)
  2006. {
  2007. struct file_lock *file_lock = locks_alloc_lock();
  2008. struct inode *inode = locks_inode(filp);
  2009. struct file *f;
  2010. int error;
  2011. if (file_lock == NULL)
  2012. return -ENOLCK;
  2013. /* Don't allow mandatory locks on files that may be memory mapped
  2014. * and shared.
  2015. */
  2016. if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
  2017. error = -EAGAIN;
  2018. goto out;
  2019. }
  2020. error = flock_to_posix_lock(filp, file_lock, flock);
  2021. if (error)
  2022. goto out;
  2023. error = check_fmode_for_setlk(file_lock);
  2024. if (error)
  2025. goto out;
  2026. /*
  2027. * If the cmd is requesting file-private locks, then set the
  2028. * FL_OFDLCK flag and override the owner.
  2029. */
  2030. switch (cmd) {
  2031. case F_OFD_SETLK:
  2032. error = -EINVAL;
  2033. if (flock->l_pid != 0)
  2034. goto out;
  2035. cmd = F_SETLK;
  2036. file_lock->fl_flags |= FL_OFDLCK;
  2037. file_lock->fl_owner = filp;
  2038. break;
  2039. case F_OFD_SETLKW:
  2040. error = -EINVAL;
  2041. if (flock->l_pid != 0)
  2042. goto out;
  2043. cmd = F_SETLKW;
  2044. file_lock->fl_flags |= FL_OFDLCK;
  2045. file_lock->fl_owner = filp;
  2046. /* Fallthrough */
  2047. case F_SETLKW:
  2048. file_lock->fl_flags |= FL_SLEEP;
  2049. }
  2050. error = do_lock_file_wait(filp, cmd, file_lock);
  2051. /*
  2052. * Attempt to detect a close/fcntl race and recover by releasing the
  2053. * lock that was just acquired. There is no need to do that when we're
  2054. * unlocking though, or for OFD locks.
  2055. */
  2056. if (!error && file_lock->fl_type != F_UNLCK &&
  2057. !(file_lock->fl_flags & FL_OFDLCK)) {
  2058. /*
  2059. * We need that spin_lock here - it prevents reordering between
  2060. * update of i_flctx->flc_posix and check for it done in
  2061. * close(). rcu_read_lock() wouldn't do.
  2062. */
  2063. spin_lock(&current->files->file_lock);
  2064. f = fcheck(fd);
  2065. spin_unlock(&current->files->file_lock);
  2066. if (f != filp) {
  2067. file_lock->fl_type = F_UNLCK;
  2068. error = do_lock_file_wait(filp, cmd, file_lock);
  2069. WARN_ON_ONCE(error);
  2070. error = -EBADF;
  2071. }
  2072. }
  2073. out:
  2074. trace_fcntl_setlk(inode, file_lock, error);
  2075. locks_free_lock(file_lock);
  2076. return error;
  2077. }
  2078. #if BITS_PER_LONG == 32
  2079. /* Report the first existing lock that would conflict with l.
  2080. * This implements the F_GETLK command of fcntl().
  2081. */
  2082. int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
  2083. {
  2084. struct file_lock *fl;
  2085. int error;
  2086. fl = locks_alloc_lock();
  2087. if (fl == NULL)
  2088. return -ENOMEM;
  2089. error = -EINVAL;
  2090. if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
  2091. goto out;
  2092. error = flock64_to_posix_lock(filp, fl, flock);
  2093. if (error)
  2094. goto out;
  2095. if (cmd == F_OFD_GETLK) {
  2096. error = -EINVAL;
  2097. if (flock->l_pid != 0)
  2098. goto out;
  2099. cmd = F_GETLK64;
  2100. fl->fl_flags |= FL_OFDLCK;
  2101. fl->fl_owner = filp;
  2102. }
  2103. error = vfs_test_lock(filp, fl);
  2104. if (error)
  2105. goto out;
  2106. flock->l_type = fl->fl_type;
  2107. if (fl->fl_type != F_UNLCK)
  2108. posix_lock_to_flock64(flock, fl);
  2109. out:
  2110. locks_free_lock(fl);
  2111. return error;
  2112. }
  2113. /* Apply the lock described by l to an open file descriptor.
  2114. * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  2115. */
  2116. int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
  2117. struct flock64 *flock)
  2118. {
  2119. struct file_lock *file_lock = locks_alloc_lock();
  2120. struct inode *inode = locks_inode(filp);
  2121. struct file *f;
  2122. int error;
  2123. if (file_lock == NULL)
  2124. return -ENOLCK;
  2125. /* Don't allow mandatory locks on files that may be memory mapped
  2126. * and shared.
  2127. */
  2128. if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
  2129. error = -EAGAIN;
  2130. goto out;
  2131. }
  2132. error = flock64_to_posix_lock(filp, file_lock, flock);
  2133. if (error)
  2134. goto out;
  2135. error = check_fmode_for_setlk(file_lock);
  2136. if (error)
  2137. goto out;
  2138. /*
  2139. * If the cmd is requesting file-private locks, then set the
  2140. * FL_OFDLCK flag and override the owner.
  2141. */
  2142. switch (cmd) {
  2143. case F_OFD_SETLK:
  2144. error = -EINVAL;
  2145. if (flock->l_pid != 0)
  2146. goto out;
  2147. cmd = F_SETLK64;
  2148. file_lock->fl_flags |= FL_OFDLCK;
  2149. file_lock->fl_owner = filp;
  2150. break;
  2151. case F_OFD_SETLKW:
  2152. error = -EINVAL;
  2153. if (flock->l_pid != 0)
  2154. goto out;
  2155. cmd = F_SETLKW64;
  2156. file_lock->fl_flags |= FL_OFDLCK;
  2157. file_lock->fl_owner = filp;
  2158. /* Fallthrough */
  2159. case F_SETLKW64:
  2160. file_lock->fl_flags |= FL_SLEEP;
  2161. }
  2162. error = do_lock_file_wait(filp, cmd, file_lock);
  2163. /*
  2164. * Attempt to detect a close/fcntl race and recover by releasing the
  2165. * lock that was just acquired. There is no need to do that when we're
  2166. * unlocking though, or for OFD locks.
  2167. */
  2168. if (!error && file_lock->fl_type != F_UNLCK &&
  2169. !(file_lock->fl_flags & FL_OFDLCK)) {
  2170. /*
  2171. * We need that spin_lock here - it prevents reordering between
  2172. * update of i_flctx->flc_posix and check for it done in
  2173. * close(). rcu_read_lock() wouldn't do.
  2174. */
  2175. spin_lock(&current->files->file_lock);
  2176. f = fcheck(fd);
  2177. spin_unlock(&current->files->file_lock);
  2178. if (f != filp) {
  2179. file_lock->fl_type = F_UNLCK;
  2180. error = do_lock_file_wait(filp, cmd, file_lock);
  2181. WARN_ON_ONCE(error);
  2182. error = -EBADF;
  2183. }
  2184. }
  2185. out:
  2186. locks_free_lock(file_lock);
  2187. return error;
  2188. }
  2189. #endif /* BITS_PER_LONG == 32 */
  2190. /*
  2191. * This function is called when the file is being removed
  2192. * from the task's fd array. POSIX locks belonging to this task
  2193. * are deleted at this time.
  2194. */
  2195. void locks_remove_posix(struct file *filp, fl_owner_t owner)
  2196. {
  2197. int error;
  2198. struct inode *inode = locks_inode(filp);
  2199. struct file_lock lock;
  2200. struct file_lock_context *ctx;
  2201. /*
  2202. * If there are no locks held on this file, we don't need to call
  2203. * posix_lock_file(). Another process could be setting a lock on this
  2204. * file at the same time, but we wouldn't remove that lock anyway.
  2205. */
  2206. ctx = smp_load_acquire(&inode->i_flctx);
  2207. if (!ctx || list_empty(&ctx->flc_posix))
  2208. return;
  2209. lock.fl_type = F_UNLCK;
  2210. lock.fl_flags = FL_POSIX | FL_CLOSE;
  2211. lock.fl_start = 0;
  2212. lock.fl_end = OFFSET_MAX;
  2213. lock.fl_owner = owner;
  2214. lock.fl_pid = current->tgid;
  2215. lock.fl_file = filp;
  2216. lock.fl_ops = NULL;
  2217. lock.fl_lmops = NULL;
  2218. error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
  2219. if (lock.fl_ops && lock.fl_ops->fl_release_private)
  2220. lock.fl_ops->fl_release_private(&lock);
  2221. trace_locks_remove_posix(inode, &lock, error);
  2222. }
  2223. EXPORT_SYMBOL(locks_remove_posix);
  2224. /* The i_flctx must be valid when calling into here */
  2225. static void
  2226. locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
  2227. {
  2228. struct file_lock fl = {
  2229. .fl_owner = filp,
  2230. .fl_pid = current->tgid,
  2231. .fl_file = filp,
  2232. .fl_flags = FL_FLOCK | FL_CLOSE,
  2233. .fl_type = F_UNLCK,
  2234. .fl_end = OFFSET_MAX,
  2235. };
  2236. struct inode *inode = locks_inode(filp);
  2237. if (list_empty(&flctx->flc_flock))
  2238. return;
  2239. if (filp->f_op->flock && is_remote_lock(filp))
  2240. filp->f_op->flock(filp, F_SETLKW, &fl);
  2241. else
  2242. flock_lock_inode(inode, &fl);
  2243. if (fl.fl_ops && fl.fl_ops->fl_release_private)
  2244. fl.fl_ops->fl_release_private(&fl);
  2245. }
  2246. /* The i_flctx must be valid when calling into here */
  2247. static void
  2248. locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
  2249. {
  2250. struct file_lock *fl, *tmp;
  2251. LIST_HEAD(dispose);
  2252. if (list_empty(&ctx->flc_lease))
  2253. return;
  2254. percpu_down_read_preempt_disable(&file_rwsem);
  2255. spin_lock(&ctx->flc_lock);
  2256. list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
  2257. if (filp == fl->fl_file)
  2258. lease_modify(fl, F_UNLCK, &dispose);
  2259. spin_unlock(&ctx->flc_lock);
  2260. percpu_up_read_preempt_enable(&file_rwsem);
  2261. locks_dispose_list(&dispose);
  2262. }
  2263. /*
  2264. * This function is called on the last close of an open file.
  2265. */
  2266. void locks_remove_file(struct file *filp)
  2267. {
  2268. struct file_lock_context *ctx;
  2269. ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
  2270. if (!ctx)
  2271. return;
  2272. /* remove any OFD locks */
  2273. locks_remove_posix(filp, filp);
  2274. /* remove flock locks */
  2275. locks_remove_flock(filp, ctx);
  2276. /* remove any leases */
  2277. locks_remove_lease(filp, ctx);
  2278. spin_lock(&ctx->flc_lock);
  2279. locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
  2280. locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
  2281. locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
  2282. spin_unlock(&ctx->flc_lock);
  2283. }
  2284. /**
  2285. * posix_unblock_lock - stop waiting for a file lock
  2286. * @waiter: the lock which was waiting
  2287. *
  2288. * lockd needs to block waiting for locks.
  2289. */
  2290. int
  2291. posix_unblock_lock(struct file_lock *waiter)
  2292. {
  2293. int status = 0;
  2294. spin_lock(&blocked_lock_lock);
  2295. if (waiter->fl_next)
  2296. __locks_delete_block(waiter);
  2297. else
  2298. status = -ENOENT;
  2299. spin_unlock(&blocked_lock_lock);
  2300. return status;
  2301. }
  2302. EXPORT_SYMBOL(posix_unblock_lock);
  2303. /**
  2304. * vfs_cancel_lock - file byte range unblock lock
  2305. * @filp: The file to apply the unblock to
  2306. * @fl: The lock to be unblocked
  2307. *
  2308. * Used by lock managers to cancel blocked requests
  2309. */
  2310. int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
  2311. {
  2312. if (filp->f_op->lock && is_remote_lock(filp))
  2313. return filp->f_op->lock(filp, F_CANCELLK, fl);
  2314. return 0;
  2315. }
  2316. EXPORT_SYMBOL_GPL(vfs_cancel_lock);
  2317. #ifdef CONFIG_PROC_FS
  2318. #include <linux/proc_fs.h>
  2319. #include <linux/seq_file.h>
  2320. struct locks_iterator {
  2321. int li_cpu;
  2322. loff_t li_pos;
  2323. };
  2324. static void lock_get_status(struct seq_file *f, struct file_lock *fl,
  2325. loff_t id, char *pfx)
  2326. {
  2327. struct inode *inode = NULL;
  2328. unsigned int fl_pid;
  2329. struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
  2330. fl_pid = locks_translate_pid(fl, proc_pidns);
  2331. /*
  2332. * If there isn't a fl_pid don't display who is waiting on
  2333. * the lock if we are called from locks_show, or if we are
  2334. * called from __show_fd_info - skip lock entirely
  2335. */
  2336. if (fl_pid == 0)
  2337. return;
  2338. if (fl->fl_file != NULL)
  2339. inode = locks_inode(fl->fl_file);
  2340. seq_printf(f, "%lld:%s ", id, pfx);
  2341. if (IS_POSIX(fl)) {
  2342. if (fl->fl_flags & FL_ACCESS)
  2343. seq_puts(f, "ACCESS");
  2344. else if (IS_OFDLCK(fl))
  2345. seq_puts(f, "OFDLCK");
  2346. else
  2347. seq_puts(f, "POSIX ");
  2348. seq_printf(f, " %s ",
  2349. (inode == NULL) ? "*NOINODE*" :
  2350. mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
  2351. } else if (IS_FLOCK(fl)) {
  2352. if (fl->fl_type & LOCK_MAND) {
  2353. seq_puts(f, "FLOCK MSNFS ");
  2354. } else {
  2355. seq_puts(f, "FLOCK ADVISORY ");
  2356. }
  2357. } else if (IS_LEASE(fl)) {
  2358. if (fl->fl_flags & FL_DELEG)
  2359. seq_puts(f, "DELEG ");
  2360. else
  2361. seq_puts(f, "LEASE ");
  2362. if (lease_breaking(fl))
  2363. seq_puts(f, "BREAKING ");
  2364. else if (fl->fl_file)
  2365. seq_puts(f, "ACTIVE ");
  2366. else
  2367. seq_puts(f, "BREAKER ");
  2368. } else {
  2369. seq_puts(f, "UNKNOWN UNKNOWN ");
  2370. }
  2371. if (fl->fl_type & LOCK_MAND) {
  2372. seq_printf(f, "%s ",
  2373. (fl->fl_type & LOCK_READ)
  2374. ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
  2375. : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
  2376. } else {
  2377. seq_printf(f, "%s ",
  2378. (lease_breaking(fl))
  2379. ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
  2380. : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
  2381. }
  2382. if (inode) {
  2383. /* userspace relies on this representation of dev_t */
  2384. seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
  2385. MAJOR(inode->i_sb->s_dev),
  2386. MINOR(inode->i_sb->s_dev), inode->i_ino);
  2387. } else {
  2388. seq_printf(f, "%d <none>:0 ", fl_pid);
  2389. }
  2390. if (IS_POSIX(fl)) {
  2391. if (fl->fl_end == OFFSET_MAX)
  2392. seq_printf(f, "%Ld EOF\n", fl->fl_start);
  2393. else
  2394. seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
  2395. } else {
  2396. seq_puts(f, "0 EOF\n");
  2397. }
  2398. }
  2399. static int locks_show(struct seq_file *f, void *v)
  2400. {
  2401. struct locks_iterator *iter = f->private;
  2402. struct file_lock *fl, *bfl;
  2403. struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
  2404. fl = hlist_entry(v, struct file_lock, fl_link);
  2405. if (locks_translate_pid(fl, proc_pidns) == 0)
  2406. return 0;
  2407. lock_get_status(f, fl, iter->li_pos, "");
  2408. list_for_each_entry(bfl, &fl->fl_block, fl_block)
  2409. lock_get_status(f, bfl, iter->li_pos, " ->");
  2410. return 0;
  2411. }
  2412. static void __show_fd_locks(struct seq_file *f,
  2413. struct list_head *head, int *id,
  2414. struct file *filp, struct files_struct *files)
  2415. {
  2416. struct file_lock *fl;
  2417. list_for_each_entry(fl, head, fl_list) {
  2418. if (filp != fl->fl_file)
  2419. continue;
  2420. if (fl->fl_owner != files &&
  2421. fl->fl_owner != filp)
  2422. continue;
  2423. (*id)++;
  2424. seq_puts(f, "lock:\t");
  2425. lock_get_status(f, fl, *id, "");
  2426. }
  2427. }
  2428. void show_fd_locks(struct seq_file *f,
  2429. struct file *filp, struct files_struct *files)
  2430. {
  2431. struct inode *inode = locks_inode(filp);
  2432. struct file_lock_context *ctx;
  2433. int id = 0;
  2434. ctx = smp_load_acquire(&inode->i_flctx);
  2435. if (!ctx)
  2436. return;
  2437. spin_lock(&ctx->flc_lock);
  2438. __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
  2439. __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
  2440. __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
  2441. spin_unlock(&ctx->flc_lock);
  2442. }
  2443. static void *locks_start(struct seq_file *f, loff_t *pos)
  2444. __acquires(&blocked_lock_lock)
  2445. {
  2446. struct locks_iterator *iter = f->private;
  2447. iter->li_pos = *pos + 1;
  2448. percpu_down_write(&file_rwsem);
  2449. spin_lock(&blocked_lock_lock);
  2450. return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
  2451. }
  2452. static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
  2453. {
  2454. struct locks_iterator *iter = f->private;
  2455. ++iter->li_pos;
  2456. return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
  2457. }
  2458. static void locks_stop(struct seq_file *f, void *v)
  2459. __releases(&blocked_lock_lock)
  2460. {
  2461. spin_unlock(&blocked_lock_lock);
  2462. percpu_up_write(&file_rwsem);
  2463. }
  2464. static const struct seq_operations locks_seq_operations = {
  2465. .start = locks_start,
  2466. .next = locks_next,
  2467. .stop = locks_stop,
  2468. .show = locks_show,
  2469. };
  2470. static int locks_open(struct inode *inode, struct file *filp)
  2471. {
  2472. return seq_open_private(filp, &locks_seq_operations,
  2473. sizeof(struct locks_iterator));
  2474. }
  2475. static const struct file_operations proc_locks_operations = {
  2476. .open = locks_open,
  2477. .read = seq_read,
  2478. .llseek = seq_lseek,
  2479. .release = seq_release_private,
  2480. };
  2481. static int __init proc_locks_init(void)
  2482. {
  2483. proc_create("locks", 0, NULL, &proc_locks_operations);
  2484. return 0;
  2485. }
  2486. fs_initcall(proc_locks_init);
  2487. #endif
  2488. static int __init filelock_init(void)
  2489. {
  2490. int i;
  2491. flctx_cache = kmem_cache_create("file_lock_ctx",
  2492. sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
  2493. filelock_cache = kmem_cache_create("file_lock_cache",
  2494. sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
  2495. for_each_possible_cpu(i) {
  2496. struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
  2497. spin_lock_init(&fll->lock);
  2498. INIT_HLIST_HEAD(&fll->hlist);
  2499. }
  2500. return 0;
  2501. }
  2502. core_initcall(filelock_init);