locks.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819
  1. /*
  2. * linux/fs/locks.c
  3. *
  4. * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
  5. * Doug Evans (dje@spiff.uucp), August 07, 1992
  6. *
  7. * Deadlock detection added.
  8. * FIXME: one thing isn't handled yet:
  9. * - mandatory locks (requires lots of changes elsewhere)
  10. * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
  11. *
  12. * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
  13. * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
  14. *
  15. * Converted file_lock_table to a linked list from an array, which eliminates
  16. * the limits on how many active file locks are open.
  17. * Chad Page (pageone@netcom.com), November 27, 1994
  18. *
  19. * Removed dependency on file descriptors. dup()'ed file descriptors now
  20. * get the same locks as the original file descriptors, and a close() on
  21. * any file descriptor removes ALL the locks on the file for the current
  22. * process. Since locks still depend on the process id, locks are inherited
  23. * after an exec() but not after a fork(). This agrees with POSIX, and both
  24. * BSD and SVR4 practice.
  25. * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
  26. *
  27. * Scrapped free list which is redundant now that we allocate locks
  28. * dynamically with kmalloc()/kfree().
  29. * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
  30. *
  31. * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
  32. *
  33. * FL_POSIX locks are created with calls to fcntl() and lockf() through the
  34. * fcntl() system call. They have the semantics described above.
  35. *
  36. * FL_FLOCK locks are created with calls to flock(), through the flock()
  37. * system call, which is new. Old C libraries implement flock() via fcntl()
  38. * and will continue to use the old, broken implementation.
  39. *
  40. * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
  41. * with a file pointer (filp). As a result they can be shared by a parent
  42. * process and its children after a fork(). They are removed when the last
  43. * file descriptor referring to the file pointer is closed (unless explicitly
  44. * unlocked).
  45. *
  46. * FL_FLOCK locks never deadlock, an existing lock is always removed before
  47. * upgrading from shared to exclusive (or vice versa). When this happens
  48. * any processes blocked by the current lock are woken up and allowed to
  49. * run before the new lock is applied.
  50. * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
  51. *
  52. * Removed some race conditions in flock_lock_file(), marked other possible
  53. * races. Just grep for FIXME to see them.
  54. * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
  55. *
  56. * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
  57. * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
  58. * once we've checked for blocking and deadlocking.
  59. * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
  60. *
  61. * Initial implementation of mandatory locks. SunOS turned out to be
  62. * a rotten model, so I implemented the "obvious" semantics.
  63. * See 'Documentation/filesystems/mandatory-locking.txt' for details.
  64. * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
  65. *
  66. * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
  67. * check if a file has mandatory locks, used by mmap(), open() and creat() to
  68. * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
  69. * Manual, Section 2.
  70. * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
  71. *
  72. * Tidied up block list handling. Added '/proc/locks' interface.
  73. * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
  74. *
  75. * Fixed deadlock condition for pathological code that mixes calls to
  76. * flock() and fcntl().
  77. * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
  78. *
  79. * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
  80. * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
  81. * guarantee sensible behaviour in the case where file system modules might
  82. * be compiled with different options than the kernel itself.
  83. * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  84. *
  85. * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
  86. * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
  87. * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
  88. *
  89. * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
  90. * locks. Changed process synchronisation to avoid dereferencing locks that
  91. * have already been freed.
  92. * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
  93. *
  94. * Made the block list a circular list to minimise searching in the list.
  95. * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
  96. *
  97. * Made mandatory locking a mount option. Default is not to allow mandatory
  98. * locking.
  99. * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
  100. *
  101. * Some adaptations for NFS support.
  102. * Olaf Kirch (okir@monad.swb.de), Dec 1996,
  103. *
  104. * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
  105. * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
  106. *
  107. * Use slab allocator instead of kmalloc/kfree.
  108. * Use generic list implementation from <linux/list.h>.
  109. * Sped up posix_locks_deadlock by only considering blocked locks.
  110. * Matthew Wilcox <willy@debian.org>, March, 2000.
  111. *
  112. * Leases and LOCK_MAND
  113. * Matthew Wilcox <willy@debian.org>, June, 2000.
  114. * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
  115. */
  116. #include <linux/capability.h>
  117. #include <linux/file.h>
  118. #include <linux/fdtable.h>
  119. #include <linux/fs.h>
  120. #include <linux/init.h>
  121. #include <linux/security.h>
  122. #include <linux/slab.h>
  123. #include <linux/syscalls.h>
  124. #include <linux/time.h>
  125. #include <linux/rcupdate.h>
  126. #include <linux/pid_namespace.h>
  127. #include <linux/hashtable.h>
  128. #include <linux/percpu.h>
  129. #define CREATE_TRACE_POINTS
  130. #include <trace/events/filelock.h>
  131. #include <asm/uaccess.h>
  132. #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
  133. #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
  134. #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
  135. #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
  136. static bool lease_breaking(struct file_lock *fl)
  137. {
  138. return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
  139. }
  140. static int target_leasetype(struct file_lock *fl)
  141. {
  142. if (fl->fl_flags & FL_UNLOCK_PENDING)
  143. return F_UNLCK;
  144. if (fl->fl_flags & FL_DOWNGRADE_PENDING)
  145. return F_RDLCK;
  146. return fl->fl_type;
  147. }
  148. int leases_enable = 1;
  149. int lease_break_time = 45;
  150. /*
  151. * The global file_lock_list is only used for displaying /proc/locks, so we
  152. * keep a list on each CPU, with each list protected by its own spinlock.
  153. * Global serialization is done using file_rwsem.
  154. *
  155. * Note that alterations to the list also require that the relevant flc_lock is
  156. * held.
  157. */
  158. struct file_lock_list_struct {
  159. spinlock_t lock;
  160. struct hlist_head hlist;
  161. };
  162. static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
  163. DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
  164. /*
  165. * The blocked_hash is used to find POSIX lock loops for deadlock detection.
  166. * It is protected by blocked_lock_lock.
  167. *
  168. * We hash locks by lockowner in order to optimize searching for the lock a
  169. * particular lockowner is waiting on.
  170. *
  171. * FIXME: make this value scale via some heuristic? We generally will want more
  172. * buckets when we have more lockowners holding locks, but that's a little
  173. * difficult to determine without knowing what the workload will look like.
  174. */
  175. #define BLOCKED_HASH_BITS 7
  176. static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
  177. /*
  178. * This lock protects the blocked_hash. Generally, if you're accessing it, you
  179. * want to be holding this lock.
  180. *
  181. * In addition, it also protects the fl->fl_block list, and the fl->fl_next
  182. * pointer for file_lock structures that are acting as lock requests (in
  183. * contrast to those that are acting as records of acquired locks).
  184. *
  185. * Note that when we acquire this lock in order to change the above fields,
  186. * we often hold the flc_lock as well. In certain cases, when reading the fields
  187. * protected by this lock, we can skip acquiring it iff we already hold the
  188. * flc_lock.
  189. *
  190. * In particular, adding an entry to the fl_block list requires that you hold
  191. * both the flc_lock and the blocked_lock_lock (acquired in that order).
  192. * Deleting an entry from the list however only requires the file_lock_lock.
  193. */
  194. static DEFINE_SPINLOCK(blocked_lock_lock);
  195. static struct kmem_cache *flctx_cache __read_mostly;
  196. static struct kmem_cache *filelock_cache __read_mostly;
  197. static struct file_lock_context *
  198. locks_get_lock_context(struct inode *inode, int type)
  199. {
  200. struct file_lock_context *ctx;
  201. /* paired with cmpxchg() below */
  202. ctx = smp_load_acquire(&inode->i_flctx);
  203. if (likely(ctx) || type == F_UNLCK)
  204. goto out;
  205. ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
  206. if (!ctx)
  207. goto out;
  208. spin_lock_init(&ctx->flc_lock);
  209. INIT_LIST_HEAD(&ctx->flc_flock);
  210. INIT_LIST_HEAD(&ctx->flc_posix);
  211. INIT_LIST_HEAD(&ctx->flc_lease);
  212. /*
  213. * Assign the pointer if it's not already assigned. If it is, then
  214. * free the context we just allocated.
  215. */
  216. if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
  217. kmem_cache_free(flctx_cache, ctx);
  218. ctx = smp_load_acquire(&inode->i_flctx);
  219. }
  220. out:
  221. trace_locks_get_lock_context(inode, type, ctx);
  222. return ctx;
  223. }
  224. static void
  225. locks_dump_ctx_list(struct list_head *list, char *list_type)
  226. {
  227. struct file_lock *fl;
  228. list_for_each_entry(fl, list, fl_list) {
  229. pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
  230. }
  231. }
  232. static void
  233. locks_check_ctx_lists(struct inode *inode)
  234. {
  235. struct file_lock_context *ctx = inode->i_flctx;
  236. if (unlikely(!list_empty(&ctx->flc_flock) ||
  237. !list_empty(&ctx->flc_posix) ||
  238. !list_empty(&ctx->flc_lease))) {
  239. pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
  240. MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
  241. inode->i_ino);
  242. locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
  243. locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
  244. locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
  245. }
  246. }
  247. void
  248. locks_free_lock_context(struct inode *inode)
  249. {
  250. struct file_lock_context *ctx = inode->i_flctx;
  251. if (unlikely(ctx)) {
  252. locks_check_ctx_lists(inode);
  253. kmem_cache_free(flctx_cache, ctx);
  254. }
  255. }
  256. static void locks_init_lock_heads(struct file_lock *fl)
  257. {
  258. INIT_HLIST_NODE(&fl->fl_link);
  259. INIT_LIST_HEAD(&fl->fl_list);
  260. INIT_LIST_HEAD(&fl->fl_block);
  261. init_waitqueue_head(&fl->fl_wait);
  262. }
  263. /* Allocate an empty lock structure. */
  264. struct file_lock *locks_alloc_lock(void)
  265. {
  266. struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
  267. if (fl)
  268. locks_init_lock_heads(fl);
  269. return fl;
  270. }
  271. EXPORT_SYMBOL_GPL(locks_alloc_lock);
  272. void locks_release_private(struct file_lock *fl)
  273. {
  274. if (fl->fl_ops) {
  275. if (fl->fl_ops->fl_release_private)
  276. fl->fl_ops->fl_release_private(fl);
  277. fl->fl_ops = NULL;
  278. }
  279. if (fl->fl_lmops) {
  280. if (fl->fl_lmops->lm_put_owner) {
  281. fl->fl_lmops->lm_put_owner(fl->fl_owner);
  282. fl->fl_owner = NULL;
  283. }
  284. fl->fl_lmops = NULL;
  285. }
  286. }
  287. EXPORT_SYMBOL_GPL(locks_release_private);
  288. /* Free a lock which is not in use. */
  289. void locks_free_lock(struct file_lock *fl)
  290. {
  291. BUG_ON(waitqueue_active(&fl->fl_wait));
  292. BUG_ON(!list_empty(&fl->fl_list));
  293. BUG_ON(!list_empty(&fl->fl_block));
  294. BUG_ON(!hlist_unhashed(&fl->fl_link));
  295. locks_release_private(fl);
  296. kmem_cache_free(filelock_cache, fl);
  297. }
  298. EXPORT_SYMBOL(locks_free_lock);
  299. static void
  300. locks_dispose_list(struct list_head *dispose)
  301. {
  302. struct file_lock *fl;
  303. while (!list_empty(dispose)) {
  304. fl = list_first_entry(dispose, struct file_lock, fl_list);
  305. list_del_init(&fl->fl_list);
  306. locks_free_lock(fl);
  307. }
  308. }
  309. void locks_init_lock(struct file_lock *fl)
  310. {
  311. memset(fl, 0, sizeof(struct file_lock));
  312. locks_init_lock_heads(fl);
  313. }
  314. EXPORT_SYMBOL(locks_init_lock);
  315. /*
  316. * Initialize a new lock from an existing file_lock structure.
  317. */
  318. void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
  319. {
  320. new->fl_owner = fl->fl_owner;
  321. new->fl_pid = fl->fl_pid;
  322. new->fl_file = NULL;
  323. new->fl_flags = fl->fl_flags;
  324. new->fl_type = fl->fl_type;
  325. new->fl_start = fl->fl_start;
  326. new->fl_end = fl->fl_end;
  327. new->fl_lmops = fl->fl_lmops;
  328. new->fl_ops = NULL;
  329. if (fl->fl_lmops) {
  330. if (fl->fl_lmops->lm_get_owner)
  331. fl->fl_lmops->lm_get_owner(fl->fl_owner);
  332. }
  333. }
  334. EXPORT_SYMBOL(locks_copy_conflock);
  335. void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
  336. {
  337. /* "new" must be a freshly-initialized lock */
  338. WARN_ON_ONCE(new->fl_ops);
  339. locks_copy_conflock(new, fl);
  340. new->fl_file = fl->fl_file;
  341. new->fl_ops = fl->fl_ops;
  342. if (fl->fl_ops) {
  343. if (fl->fl_ops->fl_copy_lock)
  344. fl->fl_ops->fl_copy_lock(new, fl);
  345. }
  346. }
  347. EXPORT_SYMBOL(locks_copy_lock);
  348. static inline int flock_translate_cmd(int cmd) {
  349. if (cmd & LOCK_MAND)
  350. return cmd & (LOCK_MAND | LOCK_RW);
  351. switch (cmd) {
  352. case LOCK_SH:
  353. return F_RDLCK;
  354. case LOCK_EX:
  355. return F_WRLCK;
  356. case LOCK_UN:
  357. return F_UNLCK;
  358. }
  359. return -EINVAL;
  360. }
  361. /* Fill in a file_lock structure with an appropriate FLOCK lock. */
  362. static struct file_lock *
  363. flock_make_lock(struct file *filp, unsigned int cmd)
  364. {
  365. struct file_lock *fl;
  366. int type = flock_translate_cmd(cmd);
  367. if (type < 0)
  368. return ERR_PTR(type);
  369. fl = locks_alloc_lock();
  370. if (fl == NULL)
  371. return ERR_PTR(-ENOMEM);
  372. fl->fl_file = filp;
  373. fl->fl_owner = filp;
  374. fl->fl_pid = current->tgid;
  375. fl->fl_flags = FL_FLOCK;
  376. fl->fl_type = type;
  377. fl->fl_end = OFFSET_MAX;
  378. return fl;
  379. }
  380. static int assign_type(struct file_lock *fl, long type)
  381. {
  382. switch (type) {
  383. case F_RDLCK:
  384. case F_WRLCK:
  385. case F_UNLCK:
  386. fl->fl_type = type;
  387. break;
  388. default:
  389. return -EINVAL;
  390. }
  391. return 0;
  392. }
  393. static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
  394. struct flock64 *l)
  395. {
  396. switch (l->l_whence) {
  397. case SEEK_SET:
  398. fl->fl_start = 0;
  399. break;
  400. case SEEK_CUR:
  401. fl->fl_start = filp->f_pos;
  402. break;
  403. case SEEK_END:
  404. fl->fl_start = i_size_read(file_inode(filp));
  405. break;
  406. default:
  407. return -EINVAL;
  408. }
  409. if (l->l_start > OFFSET_MAX - fl->fl_start)
  410. return -EOVERFLOW;
  411. fl->fl_start += l->l_start;
  412. if (fl->fl_start < 0)
  413. return -EINVAL;
  414. /* POSIX-1996 leaves the case l->l_len < 0 undefined;
  415. POSIX-2001 defines it. */
  416. if (l->l_len > 0) {
  417. if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
  418. return -EOVERFLOW;
  419. fl->fl_end = fl->fl_start + l->l_len - 1;
  420. } else if (l->l_len < 0) {
  421. if (fl->fl_start + l->l_len < 0)
  422. return -EINVAL;
  423. fl->fl_end = fl->fl_start - 1;
  424. fl->fl_start += l->l_len;
  425. } else
  426. fl->fl_end = OFFSET_MAX;
  427. fl->fl_owner = current->files;
  428. fl->fl_pid = current->tgid;
  429. fl->fl_file = filp;
  430. fl->fl_flags = FL_POSIX;
  431. fl->fl_ops = NULL;
  432. fl->fl_lmops = NULL;
  433. return assign_type(fl, l->l_type);
  434. }
  435. /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
  436. * style lock.
  437. */
  438. static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
  439. struct flock *l)
  440. {
  441. struct flock64 ll = {
  442. .l_type = l->l_type,
  443. .l_whence = l->l_whence,
  444. .l_start = l->l_start,
  445. .l_len = l->l_len,
  446. };
  447. return flock64_to_posix_lock(filp, fl, &ll);
  448. }
  449. /* default lease lock manager operations */
  450. static bool
  451. lease_break_callback(struct file_lock *fl)
  452. {
  453. kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
  454. return false;
  455. }
  456. static void
  457. lease_setup(struct file_lock *fl, void **priv)
  458. {
  459. struct file *filp = fl->fl_file;
  460. struct fasync_struct *fa = *priv;
  461. /*
  462. * fasync_insert_entry() returns the old entry if any. If there was no
  463. * old entry, then it used "priv" and inserted it into the fasync list.
  464. * Clear the pointer to indicate that it shouldn't be freed.
  465. */
  466. if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
  467. *priv = NULL;
  468. __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
  469. }
  470. static const struct lock_manager_operations lease_manager_ops = {
  471. .lm_break = lease_break_callback,
  472. .lm_change = lease_modify,
  473. .lm_setup = lease_setup,
  474. };
  475. /*
  476. * Initialize a lease, use the default lock manager operations
  477. */
  478. static int lease_init(struct file *filp, long type, struct file_lock *fl)
  479. {
  480. if (assign_type(fl, type) != 0)
  481. return -EINVAL;
  482. fl->fl_owner = filp;
  483. fl->fl_pid = current->tgid;
  484. fl->fl_file = filp;
  485. fl->fl_flags = FL_LEASE;
  486. fl->fl_start = 0;
  487. fl->fl_end = OFFSET_MAX;
  488. fl->fl_ops = NULL;
  489. fl->fl_lmops = &lease_manager_ops;
  490. return 0;
  491. }
  492. /* Allocate a file_lock initialised to this type of lease */
  493. static struct file_lock *lease_alloc(struct file *filp, long type)
  494. {
  495. struct file_lock *fl = locks_alloc_lock();
  496. int error = -ENOMEM;
  497. if (fl == NULL)
  498. return ERR_PTR(error);
  499. error = lease_init(filp, type, fl);
  500. if (error) {
  501. locks_free_lock(fl);
  502. return ERR_PTR(error);
  503. }
  504. return fl;
  505. }
  506. /* Check if two locks overlap each other.
  507. */
  508. static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
  509. {
  510. return ((fl1->fl_end >= fl2->fl_start) &&
  511. (fl2->fl_end >= fl1->fl_start));
  512. }
  513. /*
  514. * Check whether two locks have the same owner.
  515. */
  516. static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
  517. {
  518. if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
  519. return fl2->fl_lmops == fl1->fl_lmops &&
  520. fl1->fl_lmops->lm_compare_owner(fl1, fl2);
  521. return fl1->fl_owner == fl2->fl_owner;
  522. }
  523. /* Must be called with the flc_lock held! */
  524. static void locks_insert_global_locks(struct file_lock *fl)
  525. {
  526. struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
  527. percpu_rwsem_assert_held(&file_rwsem);
  528. spin_lock(&fll->lock);
  529. fl->fl_link_cpu = smp_processor_id();
  530. hlist_add_head(&fl->fl_link, &fll->hlist);
  531. spin_unlock(&fll->lock);
  532. }
  533. /* Must be called with the flc_lock held! */
  534. static void locks_delete_global_locks(struct file_lock *fl)
  535. {
  536. struct file_lock_list_struct *fll;
  537. percpu_rwsem_assert_held(&file_rwsem);
  538. /*
  539. * Avoid taking lock if already unhashed. This is safe since this check
  540. * is done while holding the flc_lock, and new insertions into the list
  541. * also require that it be held.
  542. */
  543. if (hlist_unhashed(&fl->fl_link))
  544. return;
  545. fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
  546. spin_lock(&fll->lock);
  547. hlist_del_init(&fl->fl_link);
  548. spin_unlock(&fll->lock);
  549. }
  550. static unsigned long
  551. posix_owner_key(struct file_lock *fl)
  552. {
  553. if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
  554. return fl->fl_lmops->lm_owner_key(fl);
  555. return (unsigned long)fl->fl_owner;
  556. }
  557. static void locks_insert_global_blocked(struct file_lock *waiter)
  558. {
  559. lockdep_assert_held(&blocked_lock_lock);
  560. hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
  561. }
  562. static void locks_delete_global_blocked(struct file_lock *waiter)
  563. {
  564. lockdep_assert_held(&blocked_lock_lock);
  565. hash_del(&waiter->fl_link);
  566. }
  567. /* Remove waiter from blocker's block list.
  568. * When blocker ends up pointing to itself then the list is empty.
  569. *
  570. * Must be called with blocked_lock_lock held.
  571. */
  572. static void __locks_delete_block(struct file_lock *waiter)
  573. {
  574. locks_delete_global_blocked(waiter);
  575. list_del_init(&waiter->fl_block);
  576. waiter->fl_next = NULL;
  577. }
  578. static void locks_delete_block(struct file_lock *waiter)
  579. {
  580. spin_lock(&blocked_lock_lock);
  581. __locks_delete_block(waiter);
  582. spin_unlock(&blocked_lock_lock);
  583. }
  584. /* Insert waiter into blocker's block list.
  585. * We use a circular list so that processes can be easily woken up in
  586. * the order they blocked. The documentation doesn't require this but
  587. * it seems like the reasonable thing to do.
  588. *
  589. * Must be called with both the flc_lock and blocked_lock_lock held. The
  590. * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
  591. * that the flc_lock is also held on insertions we can avoid taking the
  592. * blocked_lock_lock in some cases when we see that the fl_block list is empty.
  593. */
  594. static void __locks_insert_block(struct file_lock *blocker,
  595. struct file_lock *waiter)
  596. {
  597. BUG_ON(!list_empty(&waiter->fl_block));
  598. waiter->fl_next = blocker;
  599. list_add_tail(&waiter->fl_block, &blocker->fl_block);
  600. if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
  601. locks_insert_global_blocked(waiter);
  602. }
  603. /* Must be called with flc_lock held. */
  604. static void locks_insert_block(struct file_lock *blocker,
  605. struct file_lock *waiter)
  606. {
  607. spin_lock(&blocked_lock_lock);
  608. __locks_insert_block(blocker, waiter);
  609. spin_unlock(&blocked_lock_lock);
  610. }
  611. /*
  612. * Wake up processes blocked waiting for blocker.
  613. *
  614. * Must be called with the inode->flc_lock held!
  615. */
  616. static void locks_wake_up_blocks(struct file_lock *blocker)
  617. {
  618. /*
  619. * Avoid taking global lock if list is empty. This is safe since new
  620. * blocked requests are only added to the list under the flc_lock, and
  621. * the flc_lock is always held here. Note that removal from the fl_block
  622. * list does not require the flc_lock, so we must recheck list_empty()
  623. * after acquiring the blocked_lock_lock.
  624. */
  625. if (list_empty(&blocker->fl_block))
  626. return;
  627. spin_lock(&blocked_lock_lock);
  628. while (!list_empty(&blocker->fl_block)) {
  629. struct file_lock *waiter;
  630. waiter = list_first_entry(&blocker->fl_block,
  631. struct file_lock, fl_block);
  632. __locks_delete_block(waiter);
  633. if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
  634. waiter->fl_lmops->lm_notify(waiter);
  635. else
  636. wake_up(&waiter->fl_wait);
  637. }
  638. spin_unlock(&blocked_lock_lock);
  639. }
  640. static void
  641. locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
  642. {
  643. fl->fl_nspid = get_pid(task_tgid(current));
  644. list_add_tail(&fl->fl_list, before);
  645. locks_insert_global_locks(fl);
  646. }
  647. static void
  648. locks_unlink_lock_ctx(struct file_lock *fl)
  649. {
  650. locks_delete_global_locks(fl);
  651. list_del_init(&fl->fl_list);
  652. if (fl->fl_nspid) {
  653. put_pid(fl->fl_nspid);
  654. fl->fl_nspid = NULL;
  655. }
  656. locks_wake_up_blocks(fl);
  657. }
  658. static void
  659. locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
  660. {
  661. locks_unlink_lock_ctx(fl);
  662. if (dispose)
  663. list_add(&fl->fl_list, dispose);
  664. else
  665. locks_free_lock(fl);
  666. }
  667. /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
  668. * checks for shared/exclusive status of overlapping locks.
  669. */
  670. static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
  671. {
  672. if (sys_fl->fl_type == F_WRLCK)
  673. return 1;
  674. if (caller_fl->fl_type == F_WRLCK)
  675. return 1;
  676. return 0;
  677. }
  678. /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
  679. * checking before calling the locks_conflict().
  680. */
  681. static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
  682. {
  683. /* POSIX locks owned by the same process do not conflict with
  684. * each other.
  685. */
  686. if (posix_same_owner(caller_fl, sys_fl))
  687. return (0);
  688. /* Check whether they overlap */
  689. if (!locks_overlap(caller_fl, sys_fl))
  690. return 0;
  691. return (locks_conflict(caller_fl, sys_fl));
  692. }
  693. /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
  694. * checking before calling the locks_conflict().
  695. */
  696. static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
  697. {
  698. /* FLOCK locks referring to the same filp do not conflict with
  699. * each other.
  700. */
  701. if (caller_fl->fl_file == sys_fl->fl_file)
  702. return (0);
  703. if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
  704. return 0;
  705. return (locks_conflict(caller_fl, sys_fl));
  706. }
  707. void
  708. posix_test_lock(struct file *filp, struct file_lock *fl)
  709. {
  710. struct file_lock *cfl;
  711. struct file_lock_context *ctx;
  712. struct inode *inode = file_inode(filp);
  713. ctx = smp_load_acquire(&inode->i_flctx);
  714. if (!ctx || list_empty_careful(&ctx->flc_posix)) {
  715. fl->fl_type = F_UNLCK;
  716. return;
  717. }
  718. spin_lock(&ctx->flc_lock);
  719. list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
  720. if (posix_locks_conflict(fl, cfl)) {
  721. locks_copy_conflock(fl, cfl);
  722. if (cfl->fl_nspid)
  723. fl->fl_pid = pid_vnr(cfl->fl_nspid);
  724. goto out;
  725. }
  726. }
  727. fl->fl_type = F_UNLCK;
  728. out:
  729. spin_unlock(&ctx->flc_lock);
  730. return;
  731. }
  732. EXPORT_SYMBOL(posix_test_lock);
  733. /*
  734. * Deadlock detection:
  735. *
  736. * We attempt to detect deadlocks that are due purely to posix file
  737. * locks.
  738. *
  739. * We assume that a task can be waiting for at most one lock at a time.
  740. * So for any acquired lock, the process holding that lock may be
  741. * waiting on at most one other lock. That lock in turns may be held by
  742. * someone waiting for at most one other lock. Given a requested lock
  743. * caller_fl which is about to wait for a conflicting lock block_fl, we
  744. * follow this chain of waiters to ensure we are not about to create a
  745. * cycle.
  746. *
  747. * Since we do this before we ever put a process to sleep on a lock, we
  748. * are ensured that there is never a cycle; that is what guarantees that
  749. * the while() loop in posix_locks_deadlock() eventually completes.
  750. *
  751. * Note: the above assumption may not be true when handling lock
  752. * requests from a broken NFS client. It may also fail in the presence
  753. * of tasks (such as posix threads) sharing the same open file table.
  754. * To handle those cases, we just bail out after a few iterations.
  755. *
  756. * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
  757. * Because the owner is not even nominally tied to a thread of
  758. * execution, the deadlock detection below can't reasonably work well. Just
  759. * skip it for those.
  760. *
  761. * In principle, we could do a more limited deadlock detection on FL_OFDLCK
  762. * locks that just checks for the case where two tasks are attempting to
  763. * upgrade from read to write locks on the same inode.
  764. */
  765. #define MAX_DEADLK_ITERATIONS 10
  766. /* Find a lock that the owner of the given block_fl is blocking on. */
  767. static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
  768. {
  769. struct file_lock *fl;
  770. hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
  771. if (posix_same_owner(fl, block_fl))
  772. return fl->fl_next;
  773. }
  774. return NULL;
  775. }
  776. /* Must be called with the blocked_lock_lock held! */
  777. static int posix_locks_deadlock(struct file_lock *caller_fl,
  778. struct file_lock *block_fl)
  779. {
  780. int i = 0;
  781. lockdep_assert_held(&blocked_lock_lock);
  782. /*
  783. * This deadlock detector can't reasonably detect deadlocks with
  784. * FL_OFDLCK locks, since they aren't owned by a process, per-se.
  785. */
  786. if (IS_OFDLCK(caller_fl))
  787. return 0;
  788. while ((block_fl = what_owner_is_waiting_for(block_fl))) {
  789. if (i++ > MAX_DEADLK_ITERATIONS)
  790. return 0;
  791. if (posix_same_owner(caller_fl, block_fl))
  792. return 1;
  793. }
  794. return 0;
  795. }
  796. /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
  797. * after any leases, but before any posix locks.
  798. *
  799. * Note that if called with an FL_EXISTS argument, the caller may determine
  800. * whether or not a lock was successfully freed by testing the return
  801. * value for -ENOENT.
  802. */
  803. static int flock_lock_inode(struct inode *inode, struct file_lock *request)
  804. {
  805. struct file_lock *new_fl = NULL;
  806. struct file_lock *fl;
  807. struct file_lock_context *ctx;
  808. int error = 0;
  809. bool found = false;
  810. LIST_HEAD(dispose);
  811. ctx = locks_get_lock_context(inode, request->fl_type);
  812. if (!ctx) {
  813. if (request->fl_type != F_UNLCK)
  814. return -ENOMEM;
  815. return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
  816. }
  817. if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
  818. new_fl = locks_alloc_lock();
  819. if (!new_fl)
  820. return -ENOMEM;
  821. }
  822. percpu_down_read_preempt_disable(&file_rwsem);
  823. spin_lock(&ctx->flc_lock);
  824. if (request->fl_flags & FL_ACCESS)
  825. goto find_conflict;
  826. list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
  827. if (request->fl_file != fl->fl_file)
  828. continue;
  829. if (request->fl_type == fl->fl_type)
  830. goto out;
  831. found = true;
  832. locks_delete_lock_ctx(fl, &dispose);
  833. break;
  834. }
  835. if (request->fl_type == F_UNLCK) {
  836. if ((request->fl_flags & FL_EXISTS) && !found)
  837. error = -ENOENT;
  838. goto out;
  839. }
  840. find_conflict:
  841. list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
  842. if (!flock_locks_conflict(request, fl))
  843. continue;
  844. error = -EAGAIN;
  845. if (!(request->fl_flags & FL_SLEEP))
  846. goto out;
  847. error = FILE_LOCK_DEFERRED;
  848. locks_insert_block(fl, request);
  849. goto out;
  850. }
  851. if (request->fl_flags & FL_ACCESS)
  852. goto out;
  853. locks_copy_lock(new_fl, request);
  854. locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
  855. new_fl = NULL;
  856. error = 0;
  857. out:
  858. spin_unlock(&ctx->flc_lock);
  859. percpu_up_read_preempt_enable(&file_rwsem);
  860. if (new_fl)
  861. locks_free_lock(new_fl);
  862. locks_dispose_list(&dispose);
  863. return error;
  864. }
  865. static int posix_lock_inode(struct inode *inode, struct file_lock *request,
  866. struct file_lock *conflock)
  867. {
  868. struct file_lock *fl, *tmp;
  869. struct file_lock *new_fl = NULL;
  870. struct file_lock *new_fl2 = NULL;
  871. struct file_lock *left = NULL;
  872. struct file_lock *right = NULL;
  873. struct file_lock_context *ctx;
  874. int error;
  875. bool added = false;
  876. LIST_HEAD(dispose);
  877. ctx = locks_get_lock_context(inode, request->fl_type);
  878. if (!ctx)
  879. return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
  880. /*
  881. * We may need two file_lock structures for this operation,
  882. * so we get them in advance to avoid races.
  883. *
  884. * In some cases we can be sure, that no new locks will be needed
  885. */
  886. if (!(request->fl_flags & FL_ACCESS) &&
  887. (request->fl_type != F_UNLCK ||
  888. request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
  889. new_fl = locks_alloc_lock();
  890. new_fl2 = locks_alloc_lock();
  891. }
  892. percpu_down_read_preempt_disable(&file_rwsem);
  893. spin_lock(&ctx->flc_lock);
  894. /*
  895. * New lock request. Walk all POSIX locks and look for conflicts. If
  896. * there are any, either return error or put the request on the
  897. * blocker's list of waiters and the global blocked_hash.
  898. */
  899. if (request->fl_type != F_UNLCK) {
  900. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
  901. if (!posix_locks_conflict(request, fl))
  902. continue;
  903. if (conflock)
  904. locks_copy_conflock(conflock, fl);
  905. error = -EAGAIN;
  906. if (!(request->fl_flags & FL_SLEEP))
  907. goto out;
  908. /*
  909. * Deadlock detection and insertion into the blocked
  910. * locks list must be done while holding the same lock!
  911. */
  912. error = -EDEADLK;
  913. spin_lock(&blocked_lock_lock);
  914. if (likely(!posix_locks_deadlock(request, fl))) {
  915. error = FILE_LOCK_DEFERRED;
  916. __locks_insert_block(fl, request);
  917. }
  918. spin_unlock(&blocked_lock_lock);
  919. goto out;
  920. }
  921. }
  922. /* If we're just looking for a conflict, we're done. */
  923. error = 0;
  924. if (request->fl_flags & FL_ACCESS)
  925. goto out;
  926. /* Find the first old lock with the same owner as the new lock */
  927. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
  928. if (posix_same_owner(request, fl))
  929. break;
  930. }
  931. /* Process locks with this owner. */
  932. list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
  933. if (!posix_same_owner(request, fl))
  934. break;
  935. /* Detect adjacent or overlapping regions (if same lock type) */
  936. if (request->fl_type == fl->fl_type) {
  937. /* In all comparisons of start vs end, use
  938. * "start - 1" rather than "end + 1". If end
  939. * is OFFSET_MAX, end + 1 will become negative.
  940. */
  941. if (fl->fl_end < request->fl_start - 1)
  942. continue;
  943. /* If the next lock in the list has entirely bigger
  944. * addresses than the new one, insert the lock here.
  945. */
  946. if (fl->fl_start - 1 > request->fl_end)
  947. break;
  948. /* If we come here, the new and old lock are of the
  949. * same type and adjacent or overlapping. Make one
  950. * lock yielding from the lower start address of both
  951. * locks to the higher end address.
  952. */
  953. if (fl->fl_start > request->fl_start)
  954. fl->fl_start = request->fl_start;
  955. else
  956. request->fl_start = fl->fl_start;
  957. if (fl->fl_end < request->fl_end)
  958. fl->fl_end = request->fl_end;
  959. else
  960. request->fl_end = fl->fl_end;
  961. if (added) {
  962. locks_delete_lock_ctx(fl, &dispose);
  963. continue;
  964. }
  965. request = fl;
  966. added = true;
  967. } else {
  968. /* Processing for different lock types is a bit
  969. * more complex.
  970. */
  971. if (fl->fl_end < request->fl_start)
  972. continue;
  973. if (fl->fl_start > request->fl_end)
  974. break;
  975. if (request->fl_type == F_UNLCK)
  976. added = true;
  977. if (fl->fl_start < request->fl_start)
  978. left = fl;
  979. /* If the next lock in the list has a higher end
  980. * address than the new one, insert the new one here.
  981. */
  982. if (fl->fl_end > request->fl_end) {
  983. right = fl;
  984. break;
  985. }
  986. if (fl->fl_start >= request->fl_start) {
  987. /* The new lock completely replaces an old
  988. * one (This may happen several times).
  989. */
  990. if (added) {
  991. locks_delete_lock_ctx(fl, &dispose);
  992. continue;
  993. }
  994. /*
  995. * Replace the old lock with new_fl, and
  996. * remove the old one. It's safe to do the
  997. * insert here since we know that we won't be
  998. * using new_fl later, and that the lock is
  999. * just replacing an existing lock.
  1000. */
  1001. error = -ENOLCK;
  1002. if (!new_fl)
  1003. goto out;
  1004. locks_copy_lock(new_fl, request);
  1005. request = new_fl;
  1006. new_fl = NULL;
  1007. locks_insert_lock_ctx(request, &fl->fl_list);
  1008. locks_delete_lock_ctx(fl, &dispose);
  1009. added = true;
  1010. }
  1011. }
  1012. }
  1013. /*
  1014. * The above code only modifies existing locks in case of merging or
  1015. * replacing. If new lock(s) need to be inserted all modifications are
  1016. * done below this, so it's safe yet to bail out.
  1017. */
  1018. error = -ENOLCK; /* "no luck" */
  1019. if (right && left == right && !new_fl2)
  1020. goto out;
  1021. error = 0;
  1022. if (!added) {
  1023. if (request->fl_type == F_UNLCK) {
  1024. if (request->fl_flags & FL_EXISTS)
  1025. error = -ENOENT;
  1026. goto out;
  1027. }
  1028. if (!new_fl) {
  1029. error = -ENOLCK;
  1030. goto out;
  1031. }
  1032. locks_copy_lock(new_fl, request);
  1033. locks_insert_lock_ctx(new_fl, &fl->fl_list);
  1034. fl = new_fl;
  1035. new_fl = NULL;
  1036. }
  1037. if (right) {
  1038. if (left == right) {
  1039. /* The new lock breaks the old one in two pieces,
  1040. * so we have to use the second new lock.
  1041. */
  1042. left = new_fl2;
  1043. new_fl2 = NULL;
  1044. locks_copy_lock(left, right);
  1045. locks_insert_lock_ctx(left, &fl->fl_list);
  1046. }
  1047. right->fl_start = request->fl_end + 1;
  1048. locks_wake_up_blocks(right);
  1049. }
  1050. if (left) {
  1051. left->fl_end = request->fl_start - 1;
  1052. locks_wake_up_blocks(left);
  1053. }
  1054. out:
  1055. spin_unlock(&ctx->flc_lock);
  1056. percpu_up_read_preempt_enable(&file_rwsem);
  1057. /*
  1058. * Free any unused locks.
  1059. */
  1060. if (new_fl)
  1061. locks_free_lock(new_fl);
  1062. if (new_fl2)
  1063. locks_free_lock(new_fl2);
  1064. locks_dispose_list(&dispose);
  1065. trace_posix_lock_inode(inode, request, error);
  1066. return error;
  1067. }
  1068. /**
  1069. * posix_lock_file - Apply a POSIX-style lock to a file
  1070. * @filp: The file to apply the lock to
  1071. * @fl: The lock to be applied
  1072. * @conflock: Place to return a copy of the conflicting lock, if found.
  1073. *
  1074. * Add a POSIX style lock to a file.
  1075. * We merge adjacent & overlapping locks whenever possible.
  1076. * POSIX locks are sorted by owner task, then by starting address
  1077. *
  1078. * Note that if called with an FL_EXISTS argument, the caller may determine
  1079. * whether or not a lock was successfully freed by testing the return
  1080. * value for -ENOENT.
  1081. */
  1082. int posix_lock_file(struct file *filp, struct file_lock *fl,
  1083. struct file_lock *conflock)
  1084. {
  1085. return posix_lock_inode(file_inode(filp), fl, conflock);
  1086. }
  1087. EXPORT_SYMBOL(posix_lock_file);
  1088. /**
  1089. * posix_lock_inode_wait - Apply a POSIX-style lock to a file
  1090. * @inode: inode of file to which lock request should be applied
  1091. * @fl: The lock to be applied
  1092. *
  1093. * Apply a POSIX style lock request to an inode.
  1094. */
  1095. static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
  1096. {
  1097. int error;
  1098. might_sleep ();
  1099. for (;;) {
  1100. error = posix_lock_inode(inode, fl, NULL);
  1101. if (error != FILE_LOCK_DEFERRED)
  1102. break;
  1103. error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
  1104. if (!error)
  1105. continue;
  1106. locks_delete_block(fl);
  1107. break;
  1108. }
  1109. return error;
  1110. }
  1111. #ifdef CONFIG_MANDATORY_FILE_LOCKING
  1112. /**
  1113. * locks_mandatory_locked - Check for an active lock
  1114. * @file: the file to check
  1115. *
  1116. * Searches the inode's list of locks to find any POSIX locks which conflict.
  1117. * This function is called from locks_verify_locked() only.
  1118. */
  1119. int locks_mandatory_locked(struct file *file)
  1120. {
  1121. int ret;
  1122. struct inode *inode = file_inode(file);
  1123. struct file_lock_context *ctx;
  1124. struct file_lock *fl;
  1125. ctx = smp_load_acquire(&inode->i_flctx);
  1126. if (!ctx || list_empty_careful(&ctx->flc_posix))
  1127. return 0;
  1128. /*
  1129. * Search the lock list for this inode for any POSIX locks.
  1130. */
  1131. spin_lock(&ctx->flc_lock);
  1132. ret = 0;
  1133. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
  1134. if (fl->fl_owner != current->files &&
  1135. fl->fl_owner != file) {
  1136. ret = -EAGAIN;
  1137. break;
  1138. }
  1139. }
  1140. spin_unlock(&ctx->flc_lock);
  1141. return ret;
  1142. }
  1143. /**
  1144. * locks_mandatory_area - Check for a conflicting lock
  1145. * @inode: the file to check
  1146. * @filp: how the file was opened (if it was)
  1147. * @start: first byte in the file to check
  1148. * @end: lastbyte in the file to check
  1149. * @type: %F_WRLCK for a write lock, else %F_RDLCK
  1150. *
  1151. * Searches the inode's list of locks to find any POSIX locks which conflict.
  1152. */
  1153. int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
  1154. loff_t end, unsigned char type)
  1155. {
  1156. struct file_lock fl;
  1157. int error;
  1158. bool sleep = false;
  1159. locks_init_lock(&fl);
  1160. fl.fl_pid = current->tgid;
  1161. fl.fl_file = filp;
  1162. fl.fl_flags = FL_POSIX | FL_ACCESS;
  1163. if (filp && !(filp->f_flags & O_NONBLOCK))
  1164. sleep = true;
  1165. fl.fl_type = type;
  1166. fl.fl_start = start;
  1167. fl.fl_end = end;
  1168. for (;;) {
  1169. if (filp) {
  1170. fl.fl_owner = filp;
  1171. fl.fl_flags &= ~FL_SLEEP;
  1172. error = posix_lock_inode(inode, &fl, NULL);
  1173. if (!error)
  1174. break;
  1175. }
  1176. if (sleep)
  1177. fl.fl_flags |= FL_SLEEP;
  1178. fl.fl_owner = current->files;
  1179. error = posix_lock_inode(inode, &fl, NULL);
  1180. if (error != FILE_LOCK_DEFERRED)
  1181. break;
  1182. error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
  1183. if (!error) {
  1184. /*
  1185. * If we've been sleeping someone might have
  1186. * changed the permissions behind our back.
  1187. */
  1188. if (__mandatory_lock(inode))
  1189. continue;
  1190. }
  1191. locks_delete_block(&fl);
  1192. break;
  1193. }
  1194. return error;
  1195. }
  1196. EXPORT_SYMBOL(locks_mandatory_area);
  1197. #endif /* CONFIG_MANDATORY_FILE_LOCKING */
  1198. static void lease_clear_pending(struct file_lock *fl, int arg)
  1199. {
  1200. switch (arg) {
  1201. case F_UNLCK:
  1202. fl->fl_flags &= ~FL_UNLOCK_PENDING;
  1203. /* fall through: */
  1204. case F_RDLCK:
  1205. fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
  1206. }
  1207. }
  1208. /* We already had a lease on this file; just change its type */
  1209. int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
  1210. {
  1211. int error = assign_type(fl, arg);
  1212. if (error)
  1213. return error;
  1214. lease_clear_pending(fl, arg);
  1215. locks_wake_up_blocks(fl);
  1216. if (arg == F_UNLCK) {
  1217. struct file *filp = fl->fl_file;
  1218. f_delown(filp);
  1219. filp->f_owner.signum = 0;
  1220. fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
  1221. if (fl->fl_fasync != NULL) {
  1222. printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
  1223. fl->fl_fasync = NULL;
  1224. }
  1225. locks_delete_lock_ctx(fl, dispose);
  1226. }
  1227. return 0;
  1228. }
  1229. EXPORT_SYMBOL(lease_modify);
  1230. static bool past_time(unsigned long then)
  1231. {
  1232. if (!then)
  1233. /* 0 is a special value meaning "this never expires": */
  1234. return false;
  1235. return time_after(jiffies, then);
  1236. }
  1237. static void time_out_leases(struct inode *inode, struct list_head *dispose)
  1238. {
  1239. struct file_lock_context *ctx = inode->i_flctx;
  1240. struct file_lock *fl, *tmp;
  1241. lockdep_assert_held(&ctx->flc_lock);
  1242. list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
  1243. trace_time_out_leases(inode, fl);
  1244. if (past_time(fl->fl_downgrade_time))
  1245. lease_modify(fl, F_RDLCK, dispose);
  1246. if (past_time(fl->fl_break_time))
  1247. lease_modify(fl, F_UNLCK, dispose);
  1248. }
  1249. }
  1250. static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
  1251. {
  1252. if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
  1253. return false;
  1254. if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
  1255. return false;
  1256. return locks_conflict(breaker, lease);
  1257. }
  1258. static bool
  1259. any_leases_conflict(struct inode *inode, struct file_lock *breaker)
  1260. {
  1261. struct file_lock_context *ctx = inode->i_flctx;
  1262. struct file_lock *fl;
  1263. lockdep_assert_held(&ctx->flc_lock);
  1264. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1265. if (leases_conflict(fl, breaker))
  1266. return true;
  1267. }
  1268. return false;
  1269. }
  1270. /**
  1271. * __break_lease - revoke all outstanding leases on file
  1272. * @inode: the inode of the file to return
  1273. * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
  1274. * break all leases
  1275. * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
  1276. * only delegations
  1277. *
  1278. * break_lease (inlined for speed) has checked there already is at least
  1279. * some kind of lock (maybe a lease) on this file. Leases are broken on
  1280. * a call to open() or truncate(). This function can sleep unless you
  1281. * specified %O_NONBLOCK to your open().
  1282. */
  1283. int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
  1284. {
  1285. int error = 0;
  1286. struct file_lock_context *ctx;
  1287. struct file_lock *new_fl, *fl, *tmp;
  1288. unsigned long break_time;
  1289. int want_write = (mode & O_ACCMODE) != O_RDONLY;
  1290. LIST_HEAD(dispose);
  1291. new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
  1292. if (IS_ERR(new_fl))
  1293. return PTR_ERR(new_fl);
  1294. new_fl->fl_flags = type;
  1295. /* typically we will check that ctx is non-NULL before calling */
  1296. ctx = smp_load_acquire(&inode->i_flctx);
  1297. if (!ctx) {
  1298. WARN_ON_ONCE(1);
  1299. return error;
  1300. }
  1301. percpu_down_read_preempt_disable(&file_rwsem);
  1302. spin_lock(&ctx->flc_lock);
  1303. time_out_leases(inode, &dispose);
  1304. if (!any_leases_conflict(inode, new_fl))
  1305. goto out;
  1306. break_time = 0;
  1307. if (lease_break_time > 0) {
  1308. break_time = jiffies + lease_break_time * HZ;
  1309. if (break_time == 0)
  1310. break_time++; /* so that 0 means no break time */
  1311. }
  1312. list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
  1313. if (!leases_conflict(fl, new_fl))
  1314. continue;
  1315. if (want_write) {
  1316. if (fl->fl_flags & FL_UNLOCK_PENDING)
  1317. continue;
  1318. fl->fl_flags |= FL_UNLOCK_PENDING;
  1319. fl->fl_break_time = break_time;
  1320. } else {
  1321. if (lease_breaking(fl))
  1322. continue;
  1323. fl->fl_flags |= FL_DOWNGRADE_PENDING;
  1324. fl->fl_downgrade_time = break_time;
  1325. }
  1326. if (fl->fl_lmops->lm_break(fl))
  1327. locks_delete_lock_ctx(fl, &dispose);
  1328. }
  1329. if (list_empty(&ctx->flc_lease))
  1330. goto out;
  1331. if (mode & O_NONBLOCK) {
  1332. trace_break_lease_noblock(inode, new_fl);
  1333. error = -EWOULDBLOCK;
  1334. goto out;
  1335. }
  1336. restart:
  1337. fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
  1338. break_time = fl->fl_break_time;
  1339. if (break_time != 0)
  1340. break_time -= jiffies;
  1341. if (break_time == 0)
  1342. break_time++;
  1343. locks_insert_block(fl, new_fl);
  1344. trace_break_lease_block(inode, new_fl);
  1345. spin_unlock(&ctx->flc_lock);
  1346. percpu_up_read_preempt_enable(&file_rwsem);
  1347. locks_dispose_list(&dispose);
  1348. error = wait_event_interruptible_timeout(new_fl->fl_wait,
  1349. !new_fl->fl_next, break_time);
  1350. percpu_down_read_preempt_disable(&file_rwsem);
  1351. spin_lock(&ctx->flc_lock);
  1352. trace_break_lease_unblock(inode, new_fl);
  1353. locks_delete_block(new_fl);
  1354. if (error >= 0) {
  1355. /*
  1356. * Wait for the next conflicting lease that has not been
  1357. * broken yet
  1358. */
  1359. if (error == 0)
  1360. time_out_leases(inode, &dispose);
  1361. if (any_leases_conflict(inode, new_fl))
  1362. goto restart;
  1363. error = 0;
  1364. }
  1365. out:
  1366. spin_unlock(&ctx->flc_lock);
  1367. percpu_up_read_preempt_enable(&file_rwsem);
  1368. locks_dispose_list(&dispose);
  1369. locks_free_lock(new_fl);
  1370. return error;
  1371. }
  1372. EXPORT_SYMBOL(__break_lease);
  1373. /**
  1374. * lease_get_mtime - get the last modified time of an inode
  1375. * @inode: the inode
  1376. * @time: pointer to a timespec which will contain the last modified time
  1377. *
  1378. * This is to force NFS clients to flush their caches for files with
  1379. * exclusive leases. The justification is that if someone has an
  1380. * exclusive lease, then they could be modifying it.
  1381. */
  1382. void lease_get_mtime(struct inode *inode, struct timespec *time)
  1383. {
  1384. bool has_lease = false;
  1385. struct file_lock_context *ctx;
  1386. struct file_lock *fl;
  1387. ctx = smp_load_acquire(&inode->i_flctx);
  1388. if (ctx && !list_empty_careful(&ctx->flc_lease)) {
  1389. spin_lock(&ctx->flc_lock);
  1390. fl = list_first_entry_or_null(&ctx->flc_lease,
  1391. struct file_lock, fl_list);
  1392. if (fl && (fl->fl_type == F_WRLCK))
  1393. has_lease = true;
  1394. spin_unlock(&ctx->flc_lock);
  1395. }
  1396. if (has_lease)
  1397. *time = current_fs_time(inode->i_sb);
  1398. else
  1399. *time = inode->i_mtime;
  1400. }
  1401. EXPORT_SYMBOL(lease_get_mtime);
  1402. /**
  1403. * fcntl_getlease - Enquire what lease is currently active
  1404. * @filp: the file
  1405. *
  1406. * The value returned by this function will be one of
  1407. * (if no lease break is pending):
  1408. *
  1409. * %F_RDLCK to indicate a shared lease is held.
  1410. *
  1411. * %F_WRLCK to indicate an exclusive lease is held.
  1412. *
  1413. * %F_UNLCK to indicate no lease is held.
  1414. *
  1415. * (if a lease break is pending):
  1416. *
  1417. * %F_RDLCK to indicate an exclusive lease needs to be
  1418. * changed to a shared lease (or removed).
  1419. *
  1420. * %F_UNLCK to indicate the lease needs to be removed.
  1421. *
  1422. * XXX: sfr & willy disagree over whether F_INPROGRESS
  1423. * should be returned to userspace.
  1424. */
  1425. int fcntl_getlease(struct file *filp)
  1426. {
  1427. struct file_lock *fl;
  1428. struct inode *inode = file_inode(filp);
  1429. struct file_lock_context *ctx;
  1430. int type = F_UNLCK;
  1431. LIST_HEAD(dispose);
  1432. ctx = smp_load_acquire(&inode->i_flctx);
  1433. if (ctx && !list_empty_careful(&ctx->flc_lease)) {
  1434. spin_lock(&ctx->flc_lock);
  1435. time_out_leases(file_inode(filp), &dispose);
  1436. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1437. if (fl->fl_file != filp)
  1438. continue;
  1439. type = target_leasetype(fl);
  1440. break;
  1441. }
  1442. spin_unlock(&ctx->flc_lock);
  1443. locks_dispose_list(&dispose);
  1444. }
  1445. return type;
  1446. }
  1447. /**
  1448. * check_conflicting_open - see if the given dentry points to a file that has
  1449. * an existing open that would conflict with the
  1450. * desired lease.
  1451. * @dentry: dentry to check
  1452. * @arg: type of lease that we're trying to acquire
  1453. * @flags: current lock flags
  1454. *
  1455. * Check to see if there's an existing open fd on this file that would
  1456. * conflict with the lease we're trying to set.
  1457. */
  1458. static int
  1459. check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
  1460. {
  1461. int ret = 0;
  1462. struct inode *inode = dentry->d_inode;
  1463. if (flags & FL_LAYOUT)
  1464. return 0;
  1465. if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
  1466. return -EAGAIN;
  1467. if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
  1468. (atomic_read(&inode->i_count) > 1)))
  1469. ret = -EAGAIN;
  1470. return ret;
  1471. }
  1472. static int
  1473. generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
  1474. {
  1475. struct file_lock *fl, *my_fl = NULL, *lease;
  1476. struct dentry *dentry = filp->f_path.dentry;
  1477. struct inode *inode = file_inode(filp);
  1478. struct file_lock_context *ctx;
  1479. bool is_deleg = (*flp)->fl_flags & FL_DELEG;
  1480. int error;
  1481. LIST_HEAD(dispose);
  1482. lease = *flp;
  1483. trace_generic_add_lease(inode, lease);
  1484. /* Note that arg is never F_UNLCK here */
  1485. ctx = locks_get_lock_context(inode, arg);
  1486. if (!ctx)
  1487. return -ENOMEM;
  1488. /*
  1489. * In the delegation case we need mutual exclusion with
  1490. * a number of operations that take the i_mutex. We trylock
  1491. * because delegations are an optional optimization, and if
  1492. * there's some chance of a conflict--we'd rather not
  1493. * bother, maybe that's a sign this just isn't a good file to
  1494. * hand out a delegation on.
  1495. */
  1496. if (is_deleg && !inode_trylock(inode))
  1497. return -EAGAIN;
  1498. if (is_deleg && arg == F_WRLCK) {
  1499. /* Write delegations are not currently supported: */
  1500. inode_unlock(inode);
  1501. WARN_ON_ONCE(1);
  1502. return -EINVAL;
  1503. }
  1504. percpu_down_read_preempt_disable(&file_rwsem);
  1505. spin_lock(&ctx->flc_lock);
  1506. time_out_leases(inode, &dispose);
  1507. error = check_conflicting_open(dentry, arg, lease->fl_flags);
  1508. if (error)
  1509. goto out;
  1510. /*
  1511. * At this point, we know that if there is an exclusive
  1512. * lease on this file, then we hold it on this filp
  1513. * (otherwise our open of this file would have blocked).
  1514. * And if we are trying to acquire an exclusive lease,
  1515. * then the file is not open by anyone (including us)
  1516. * except for this filp.
  1517. */
  1518. error = -EAGAIN;
  1519. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1520. if (fl->fl_file == filp &&
  1521. fl->fl_owner == lease->fl_owner) {
  1522. my_fl = fl;
  1523. continue;
  1524. }
  1525. /*
  1526. * No exclusive leases if someone else has a lease on
  1527. * this file:
  1528. */
  1529. if (arg == F_WRLCK)
  1530. goto out;
  1531. /*
  1532. * Modifying our existing lease is OK, but no getting a
  1533. * new lease if someone else is opening for write:
  1534. */
  1535. if (fl->fl_flags & FL_UNLOCK_PENDING)
  1536. goto out;
  1537. }
  1538. if (my_fl != NULL) {
  1539. lease = my_fl;
  1540. error = lease->fl_lmops->lm_change(lease, arg, &dispose);
  1541. if (error)
  1542. goto out;
  1543. goto out_setup;
  1544. }
  1545. error = -EINVAL;
  1546. if (!leases_enable)
  1547. goto out;
  1548. locks_insert_lock_ctx(lease, &ctx->flc_lease);
  1549. /*
  1550. * The check in break_lease() is lockless. It's possible for another
  1551. * open to race in after we did the earlier check for a conflicting
  1552. * open but before the lease was inserted. Check again for a
  1553. * conflicting open and cancel the lease if there is one.
  1554. *
  1555. * We also add a barrier here to ensure that the insertion of the lock
  1556. * precedes these checks.
  1557. */
  1558. smp_mb();
  1559. error = check_conflicting_open(dentry, arg, lease->fl_flags);
  1560. if (error) {
  1561. locks_unlink_lock_ctx(lease);
  1562. goto out;
  1563. }
  1564. out_setup:
  1565. if (lease->fl_lmops->lm_setup)
  1566. lease->fl_lmops->lm_setup(lease, priv);
  1567. out:
  1568. spin_unlock(&ctx->flc_lock);
  1569. percpu_up_read_preempt_enable(&file_rwsem);
  1570. locks_dispose_list(&dispose);
  1571. if (is_deleg)
  1572. inode_unlock(inode);
  1573. if (!error && !my_fl)
  1574. *flp = NULL;
  1575. return error;
  1576. }
  1577. static int generic_delete_lease(struct file *filp, void *owner)
  1578. {
  1579. int error = -EAGAIN;
  1580. struct file_lock *fl, *victim = NULL;
  1581. struct inode *inode = file_inode(filp);
  1582. struct file_lock_context *ctx;
  1583. LIST_HEAD(dispose);
  1584. ctx = smp_load_acquire(&inode->i_flctx);
  1585. if (!ctx) {
  1586. trace_generic_delete_lease(inode, NULL);
  1587. return error;
  1588. }
  1589. percpu_down_read_preempt_disable(&file_rwsem);
  1590. spin_lock(&ctx->flc_lock);
  1591. list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
  1592. if (fl->fl_file == filp &&
  1593. fl->fl_owner == owner) {
  1594. victim = fl;
  1595. break;
  1596. }
  1597. }
  1598. trace_generic_delete_lease(inode, victim);
  1599. if (victim)
  1600. error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
  1601. spin_unlock(&ctx->flc_lock);
  1602. percpu_up_read_preempt_enable(&file_rwsem);
  1603. locks_dispose_list(&dispose);
  1604. return error;
  1605. }
  1606. /**
  1607. * generic_setlease - sets a lease on an open file
  1608. * @filp: file pointer
  1609. * @arg: type of lease to obtain
  1610. * @flp: input - file_lock to use, output - file_lock inserted
  1611. * @priv: private data for lm_setup (may be NULL if lm_setup
  1612. * doesn't require it)
  1613. *
  1614. * The (input) flp->fl_lmops->lm_break function is required
  1615. * by break_lease().
  1616. */
  1617. int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
  1618. void **priv)
  1619. {
  1620. struct inode *inode = file_inode(filp);
  1621. int error;
  1622. if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
  1623. return -EACCES;
  1624. if (!S_ISREG(inode->i_mode))
  1625. return -EINVAL;
  1626. error = security_file_lock(filp, arg);
  1627. if (error)
  1628. return error;
  1629. switch (arg) {
  1630. case F_UNLCK:
  1631. return generic_delete_lease(filp, *priv);
  1632. case F_RDLCK:
  1633. case F_WRLCK:
  1634. if (!(*flp)->fl_lmops->lm_break) {
  1635. WARN_ON_ONCE(1);
  1636. return -ENOLCK;
  1637. }
  1638. return generic_add_lease(filp, arg, flp, priv);
  1639. default:
  1640. return -EINVAL;
  1641. }
  1642. }
  1643. EXPORT_SYMBOL(generic_setlease);
  1644. /**
  1645. * vfs_setlease - sets a lease on an open file
  1646. * @filp: file pointer
  1647. * @arg: type of lease to obtain
  1648. * @lease: file_lock to use when adding a lease
  1649. * @priv: private info for lm_setup when adding a lease (may be
  1650. * NULL if lm_setup doesn't require it)
  1651. *
  1652. * Call this to establish a lease on the file. The "lease" argument is not
  1653. * used for F_UNLCK requests and may be NULL. For commands that set or alter
  1654. * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
  1655. * if not, this function will return -ENOLCK (and generate a scary-looking
  1656. * stack trace).
  1657. *
  1658. * The "priv" pointer is passed directly to the lm_setup function as-is. It
  1659. * may be NULL if the lm_setup operation doesn't require it.
  1660. */
  1661. int
  1662. vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
  1663. {
  1664. if (filp->f_op->setlease)
  1665. return filp->f_op->setlease(filp, arg, lease, priv);
  1666. else
  1667. return generic_setlease(filp, arg, lease, priv);
  1668. }
  1669. EXPORT_SYMBOL_GPL(vfs_setlease);
  1670. static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
  1671. {
  1672. struct file_lock *fl;
  1673. struct fasync_struct *new;
  1674. int error;
  1675. fl = lease_alloc(filp, arg);
  1676. if (IS_ERR(fl))
  1677. return PTR_ERR(fl);
  1678. new = fasync_alloc();
  1679. if (!new) {
  1680. locks_free_lock(fl);
  1681. return -ENOMEM;
  1682. }
  1683. new->fa_fd = fd;
  1684. error = vfs_setlease(filp, arg, &fl, (void **)&new);
  1685. if (fl)
  1686. locks_free_lock(fl);
  1687. if (new)
  1688. fasync_free(new);
  1689. return error;
  1690. }
  1691. /**
  1692. * fcntl_setlease - sets a lease on an open file
  1693. * @fd: open file descriptor
  1694. * @filp: file pointer
  1695. * @arg: type of lease to obtain
  1696. *
  1697. * Call this fcntl to establish a lease on the file.
  1698. * Note that you also need to call %F_SETSIG to
  1699. * receive a signal when the lease is broken.
  1700. */
  1701. int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
  1702. {
  1703. if (arg == F_UNLCK)
  1704. return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
  1705. return do_fcntl_add_lease(fd, filp, arg);
  1706. }
  1707. /**
  1708. * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
  1709. * @inode: inode of the file to apply to
  1710. * @fl: The lock to be applied
  1711. *
  1712. * Apply a FLOCK style lock request to an inode.
  1713. */
  1714. static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
  1715. {
  1716. int error;
  1717. might_sleep();
  1718. for (;;) {
  1719. error = flock_lock_inode(inode, fl);
  1720. if (error != FILE_LOCK_DEFERRED)
  1721. break;
  1722. error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
  1723. if (!error)
  1724. continue;
  1725. locks_delete_block(fl);
  1726. break;
  1727. }
  1728. return error;
  1729. }
  1730. /**
  1731. * locks_lock_inode_wait - Apply a lock to an inode
  1732. * @inode: inode of the file to apply to
  1733. * @fl: The lock to be applied
  1734. *
  1735. * Apply a POSIX or FLOCK style lock request to an inode.
  1736. */
  1737. int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
  1738. {
  1739. int res = 0;
  1740. switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
  1741. case FL_POSIX:
  1742. res = posix_lock_inode_wait(inode, fl);
  1743. break;
  1744. case FL_FLOCK:
  1745. res = flock_lock_inode_wait(inode, fl);
  1746. break;
  1747. default:
  1748. BUG();
  1749. }
  1750. return res;
  1751. }
  1752. EXPORT_SYMBOL(locks_lock_inode_wait);
  1753. /**
  1754. * sys_flock: - flock() system call.
  1755. * @fd: the file descriptor to lock.
  1756. * @cmd: the type of lock to apply.
  1757. *
  1758. * Apply a %FL_FLOCK style lock to an open file descriptor.
  1759. * The @cmd can be one of
  1760. *
  1761. * %LOCK_SH -- a shared lock.
  1762. *
  1763. * %LOCK_EX -- an exclusive lock.
  1764. *
  1765. * %LOCK_UN -- remove an existing lock.
  1766. *
  1767. * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
  1768. *
  1769. * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
  1770. * processes read and write access respectively.
  1771. */
  1772. SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
  1773. {
  1774. struct fd f = fdget(fd);
  1775. struct file_lock *lock;
  1776. int can_sleep, unlock;
  1777. int error;
  1778. error = -EBADF;
  1779. if (!f.file)
  1780. goto out;
  1781. can_sleep = !(cmd & LOCK_NB);
  1782. cmd &= ~LOCK_NB;
  1783. unlock = (cmd == LOCK_UN);
  1784. if (!unlock && !(cmd & LOCK_MAND) &&
  1785. !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
  1786. goto out_putf;
  1787. lock = flock_make_lock(f.file, cmd);
  1788. if (IS_ERR(lock)) {
  1789. error = PTR_ERR(lock);
  1790. goto out_putf;
  1791. }
  1792. if (can_sleep)
  1793. lock->fl_flags |= FL_SLEEP;
  1794. error = security_file_lock(f.file, lock->fl_type);
  1795. if (error)
  1796. goto out_free;
  1797. if (f.file->f_op->flock)
  1798. error = f.file->f_op->flock(f.file,
  1799. (can_sleep) ? F_SETLKW : F_SETLK,
  1800. lock);
  1801. else
  1802. error = locks_lock_file_wait(f.file, lock);
  1803. out_free:
  1804. locks_free_lock(lock);
  1805. out_putf:
  1806. fdput(f);
  1807. out:
  1808. return error;
  1809. }
  1810. /**
  1811. * vfs_test_lock - test file byte range lock
  1812. * @filp: The file to test lock for
  1813. * @fl: The lock to test; also used to hold result
  1814. *
  1815. * Returns -ERRNO on failure. Indicates presence of conflicting lock by
  1816. * setting conf->fl_type to something other than F_UNLCK.
  1817. */
  1818. int vfs_test_lock(struct file *filp, struct file_lock *fl)
  1819. {
  1820. if (filp->f_op->lock)
  1821. return filp->f_op->lock(filp, F_GETLK, fl);
  1822. posix_test_lock(filp, fl);
  1823. return 0;
  1824. }
  1825. EXPORT_SYMBOL_GPL(vfs_test_lock);
  1826. static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
  1827. {
  1828. flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
  1829. #if BITS_PER_LONG == 32
  1830. /*
  1831. * Make sure we can represent the posix lock via
  1832. * legacy 32bit flock.
  1833. */
  1834. if (fl->fl_start > OFFT_OFFSET_MAX)
  1835. return -EOVERFLOW;
  1836. if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
  1837. return -EOVERFLOW;
  1838. #endif
  1839. flock->l_start = fl->fl_start;
  1840. flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
  1841. fl->fl_end - fl->fl_start + 1;
  1842. flock->l_whence = 0;
  1843. flock->l_type = fl->fl_type;
  1844. return 0;
  1845. }
  1846. #if BITS_PER_LONG == 32
  1847. static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
  1848. {
  1849. flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
  1850. flock->l_start = fl->fl_start;
  1851. flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
  1852. fl->fl_end - fl->fl_start + 1;
  1853. flock->l_whence = 0;
  1854. flock->l_type = fl->fl_type;
  1855. }
  1856. #endif
  1857. /* Report the first existing lock that would conflict with l.
  1858. * This implements the F_GETLK command of fcntl().
  1859. */
  1860. int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
  1861. {
  1862. struct file_lock file_lock;
  1863. struct flock flock;
  1864. int error;
  1865. error = -EFAULT;
  1866. if (copy_from_user(&flock, l, sizeof(flock)))
  1867. goto out;
  1868. error = -EINVAL;
  1869. if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
  1870. goto out;
  1871. error = flock_to_posix_lock(filp, &file_lock, &flock);
  1872. if (error)
  1873. goto out;
  1874. if (cmd == F_OFD_GETLK) {
  1875. error = -EINVAL;
  1876. if (flock.l_pid != 0)
  1877. goto out;
  1878. cmd = F_GETLK;
  1879. file_lock.fl_flags |= FL_OFDLCK;
  1880. file_lock.fl_owner = filp;
  1881. }
  1882. error = vfs_test_lock(filp, &file_lock);
  1883. if (error)
  1884. goto out;
  1885. flock.l_type = file_lock.fl_type;
  1886. if (file_lock.fl_type != F_UNLCK) {
  1887. error = posix_lock_to_flock(&flock, &file_lock);
  1888. if (error)
  1889. goto rel_priv;
  1890. }
  1891. error = -EFAULT;
  1892. if (!copy_to_user(l, &flock, sizeof(flock)))
  1893. error = 0;
  1894. rel_priv:
  1895. locks_release_private(&file_lock);
  1896. out:
  1897. return error;
  1898. }
  1899. /**
  1900. * vfs_lock_file - file byte range lock
  1901. * @filp: The file to apply the lock to
  1902. * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
  1903. * @fl: The lock to be applied
  1904. * @conf: Place to return a copy of the conflicting lock, if found.
  1905. *
  1906. * A caller that doesn't care about the conflicting lock may pass NULL
  1907. * as the final argument.
  1908. *
  1909. * If the filesystem defines a private ->lock() method, then @conf will
  1910. * be left unchanged; so a caller that cares should initialize it to
  1911. * some acceptable default.
  1912. *
  1913. * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
  1914. * locks, the ->lock() interface may return asynchronously, before the lock has
  1915. * been granted or denied by the underlying filesystem, if (and only if)
  1916. * lm_grant is set. Callers expecting ->lock() to return asynchronously
  1917. * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
  1918. * the request is for a blocking lock. When ->lock() does return asynchronously,
  1919. * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
  1920. * request completes.
  1921. * If the request is for non-blocking lock the file system should return
  1922. * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
  1923. * with the result. If the request timed out the callback routine will return a
  1924. * nonzero return code and the file system should release the lock. The file
  1925. * system is also responsible to keep a corresponding posix lock when it
  1926. * grants a lock so the VFS can find out which locks are locally held and do
  1927. * the correct lock cleanup when required.
  1928. * The underlying filesystem must not drop the kernel lock or call
  1929. * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
  1930. * return code.
  1931. */
  1932. int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
  1933. {
  1934. if (filp->f_op->lock)
  1935. return filp->f_op->lock(filp, cmd, fl);
  1936. else
  1937. return posix_lock_file(filp, fl, conf);
  1938. }
  1939. EXPORT_SYMBOL_GPL(vfs_lock_file);
  1940. static int do_lock_file_wait(struct file *filp, unsigned int cmd,
  1941. struct file_lock *fl)
  1942. {
  1943. int error;
  1944. error = security_file_lock(filp, fl->fl_type);
  1945. if (error)
  1946. return error;
  1947. for (;;) {
  1948. error = vfs_lock_file(filp, cmd, fl, NULL);
  1949. if (error != FILE_LOCK_DEFERRED)
  1950. break;
  1951. error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
  1952. if (!error)
  1953. continue;
  1954. locks_delete_block(fl);
  1955. break;
  1956. }
  1957. return error;
  1958. }
  1959. /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
  1960. static int
  1961. check_fmode_for_setlk(struct file_lock *fl)
  1962. {
  1963. switch (fl->fl_type) {
  1964. case F_RDLCK:
  1965. if (!(fl->fl_file->f_mode & FMODE_READ))
  1966. return -EBADF;
  1967. break;
  1968. case F_WRLCK:
  1969. if (!(fl->fl_file->f_mode & FMODE_WRITE))
  1970. return -EBADF;
  1971. }
  1972. return 0;
  1973. }
  1974. /* Apply the lock described by l to an open file descriptor.
  1975. * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  1976. */
  1977. int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
  1978. struct flock __user *l)
  1979. {
  1980. struct file_lock *file_lock = locks_alloc_lock();
  1981. struct flock flock;
  1982. struct inode *inode;
  1983. struct file *f;
  1984. int error;
  1985. if (file_lock == NULL)
  1986. return -ENOLCK;
  1987. inode = file_inode(filp);
  1988. /*
  1989. * This might block, so we do it before checking the inode.
  1990. */
  1991. error = -EFAULT;
  1992. if (copy_from_user(&flock, l, sizeof(flock)))
  1993. goto out;
  1994. /* Don't allow mandatory locks on files that may be memory mapped
  1995. * and shared.
  1996. */
  1997. if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
  1998. error = -EAGAIN;
  1999. goto out;
  2000. }
  2001. error = flock_to_posix_lock(filp, file_lock, &flock);
  2002. if (error)
  2003. goto out;
  2004. error = check_fmode_for_setlk(file_lock);
  2005. if (error)
  2006. goto out;
  2007. /*
  2008. * If the cmd is requesting file-private locks, then set the
  2009. * FL_OFDLCK flag and override the owner.
  2010. */
  2011. switch (cmd) {
  2012. case F_OFD_SETLK:
  2013. error = -EINVAL;
  2014. if (flock.l_pid != 0)
  2015. goto out;
  2016. cmd = F_SETLK;
  2017. file_lock->fl_flags |= FL_OFDLCK;
  2018. file_lock->fl_owner = filp;
  2019. break;
  2020. case F_OFD_SETLKW:
  2021. error = -EINVAL;
  2022. if (flock.l_pid != 0)
  2023. goto out;
  2024. cmd = F_SETLKW;
  2025. file_lock->fl_flags |= FL_OFDLCK;
  2026. file_lock->fl_owner = filp;
  2027. /* Fallthrough */
  2028. case F_SETLKW:
  2029. file_lock->fl_flags |= FL_SLEEP;
  2030. }
  2031. error = do_lock_file_wait(filp, cmd, file_lock);
  2032. /*
  2033. * Attempt to detect a close/fcntl race and recover by releasing the
  2034. * lock that was just acquired. There is no need to do that when we're
  2035. * unlocking though, or for OFD locks.
  2036. */
  2037. if (!error && file_lock->fl_type != F_UNLCK &&
  2038. !(file_lock->fl_flags & FL_OFDLCK)) {
  2039. /*
  2040. * We need that spin_lock here - it prevents reordering between
  2041. * update of i_flctx->flc_posix and check for it done in
  2042. * close(). rcu_read_lock() wouldn't do.
  2043. */
  2044. spin_lock(&current->files->file_lock);
  2045. f = fcheck(fd);
  2046. spin_unlock(&current->files->file_lock);
  2047. if (f != filp) {
  2048. file_lock->fl_type = F_UNLCK;
  2049. error = do_lock_file_wait(filp, cmd, file_lock);
  2050. WARN_ON_ONCE(error);
  2051. error = -EBADF;
  2052. }
  2053. }
  2054. out:
  2055. trace_fcntl_setlk(inode, file_lock, error);
  2056. locks_free_lock(file_lock);
  2057. return error;
  2058. }
  2059. #if BITS_PER_LONG == 32
  2060. /* Report the first existing lock that would conflict with l.
  2061. * This implements the F_GETLK command of fcntl().
  2062. */
  2063. int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
  2064. {
  2065. struct file_lock file_lock;
  2066. struct flock64 flock;
  2067. int error;
  2068. error = -EFAULT;
  2069. if (copy_from_user(&flock, l, sizeof(flock)))
  2070. goto out;
  2071. error = -EINVAL;
  2072. if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
  2073. goto out;
  2074. error = flock64_to_posix_lock(filp, &file_lock, &flock);
  2075. if (error)
  2076. goto out;
  2077. if (cmd == F_OFD_GETLK) {
  2078. error = -EINVAL;
  2079. if (flock.l_pid != 0)
  2080. goto out;
  2081. cmd = F_GETLK64;
  2082. file_lock.fl_flags |= FL_OFDLCK;
  2083. file_lock.fl_owner = filp;
  2084. }
  2085. error = vfs_test_lock(filp, &file_lock);
  2086. if (error)
  2087. goto out;
  2088. flock.l_type = file_lock.fl_type;
  2089. if (file_lock.fl_type != F_UNLCK)
  2090. posix_lock_to_flock64(&flock, &file_lock);
  2091. error = -EFAULT;
  2092. if (!copy_to_user(l, &flock, sizeof(flock)))
  2093. error = 0;
  2094. locks_release_private(&file_lock);
  2095. out:
  2096. return error;
  2097. }
  2098. /* Apply the lock described by l to an open file descriptor.
  2099. * This implements both the F_SETLK and F_SETLKW commands of fcntl().
  2100. */
  2101. int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
  2102. struct flock64 __user *l)
  2103. {
  2104. struct file_lock *file_lock = locks_alloc_lock();
  2105. struct flock64 flock;
  2106. struct inode *inode;
  2107. struct file *f;
  2108. int error;
  2109. if (file_lock == NULL)
  2110. return -ENOLCK;
  2111. /*
  2112. * This might block, so we do it before checking the inode.
  2113. */
  2114. error = -EFAULT;
  2115. if (copy_from_user(&flock, l, sizeof(flock)))
  2116. goto out;
  2117. inode = file_inode(filp);
  2118. /* Don't allow mandatory locks on files that may be memory mapped
  2119. * and shared.
  2120. */
  2121. if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
  2122. error = -EAGAIN;
  2123. goto out;
  2124. }
  2125. error = flock64_to_posix_lock(filp, file_lock, &flock);
  2126. if (error)
  2127. goto out;
  2128. error = check_fmode_for_setlk(file_lock);
  2129. if (error)
  2130. goto out;
  2131. /*
  2132. * If the cmd is requesting file-private locks, then set the
  2133. * FL_OFDLCK flag and override the owner.
  2134. */
  2135. switch (cmd) {
  2136. case F_OFD_SETLK:
  2137. error = -EINVAL;
  2138. if (flock.l_pid != 0)
  2139. goto out;
  2140. cmd = F_SETLK64;
  2141. file_lock->fl_flags |= FL_OFDLCK;
  2142. file_lock->fl_owner = filp;
  2143. break;
  2144. case F_OFD_SETLKW:
  2145. error = -EINVAL;
  2146. if (flock.l_pid != 0)
  2147. goto out;
  2148. cmd = F_SETLKW64;
  2149. file_lock->fl_flags |= FL_OFDLCK;
  2150. file_lock->fl_owner = filp;
  2151. /* Fallthrough */
  2152. case F_SETLKW64:
  2153. file_lock->fl_flags |= FL_SLEEP;
  2154. }
  2155. error = do_lock_file_wait(filp, cmd, file_lock);
  2156. /*
  2157. * Attempt to detect a close/fcntl race and recover by releasing the
  2158. * lock that was just acquired. There is no need to do that when we're
  2159. * unlocking though, or for OFD locks.
  2160. */
  2161. if (!error && file_lock->fl_type != F_UNLCK &&
  2162. !(file_lock->fl_flags & FL_OFDLCK)) {
  2163. /*
  2164. * We need that spin_lock here - it prevents reordering between
  2165. * update of i_flctx->flc_posix and check for it done in
  2166. * close(). rcu_read_lock() wouldn't do.
  2167. */
  2168. spin_lock(&current->files->file_lock);
  2169. f = fcheck(fd);
  2170. spin_unlock(&current->files->file_lock);
  2171. if (f != filp) {
  2172. file_lock->fl_type = F_UNLCK;
  2173. error = do_lock_file_wait(filp, cmd, file_lock);
  2174. WARN_ON_ONCE(error);
  2175. error = -EBADF;
  2176. }
  2177. }
  2178. out:
  2179. locks_free_lock(file_lock);
  2180. return error;
  2181. }
  2182. #endif /* BITS_PER_LONG == 32 */
  2183. /*
  2184. * This function is called when the file is being removed
  2185. * from the task's fd array. POSIX locks belonging to this task
  2186. * are deleted at this time.
  2187. */
  2188. void locks_remove_posix(struct file *filp, fl_owner_t owner)
  2189. {
  2190. int error;
  2191. struct file_lock lock;
  2192. struct file_lock_context *ctx;
  2193. /*
  2194. * If there are no locks held on this file, we don't need to call
  2195. * posix_lock_file(). Another process could be setting a lock on this
  2196. * file at the same time, but we wouldn't remove that lock anyway.
  2197. */
  2198. ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
  2199. if (!ctx || list_empty(&ctx->flc_posix))
  2200. return;
  2201. lock.fl_type = F_UNLCK;
  2202. lock.fl_flags = FL_POSIX | FL_CLOSE;
  2203. lock.fl_start = 0;
  2204. lock.fl_end = OFFSET_MAX;
  2205. lock.fl_owner = owner;
  2206. lock.fl_pid = current->tgid;
  2207. lock.fl_file = filp;
  2208. lock.fl_ops = NULL;
  2209. lock.fl_lmops = NULL;
  2210. error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
  2211. if (lock.fl_ops && lock.fl_ops->fl_release_private)
  2212. lock.fl_ops->fl_release_private(&lock);
  2213. trace_locks_remove_posix(file_inode(filp), &lock, error);
  2214. }
  2215. EXPORT_SYMBOL(locks_remove_posix);
  2216. /* The i_flctx must be valid when calling into here */
  2217. static void
  2218. locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
  2219. {
  2220. struct file_lock fl = {
  2221. .fl_owner = filp,
  2222. .fl_pid = current->tgid,
  2223. .fl_file = filp,
  2224. .fl_flags = FL_FLOCK,
  2225. .fl_type = F_UNLCK,
  2226. .fl_end = OFFSET_MAX,
  2227. };
  2228. struct inode *inode = file_inode(filp);
  2229. if (list_empty(&flctx->flc_flock))
  2230. return;
  2231. if (filp->f_op->flock)
  2232. filp->f_op->flock(filp, F_SETLKW, &fl);
  2233. else
  2234. flock_lock_inode(inode, &fl);
  2235. if (fl.fl_ops && fl.fl_ops->fl_release_private)
  2236. fl.fl_ops->fl_release_private(&fl);
  2237. }
  2238. /* The i_flctx must be valid when calling into here */
  2239. static void
  2240. locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
  2241. {
  2242. struct file_lock *fl, *tmp;
  2243. LIST_HEAD(dispose);
  2244. if (list_empty(&ctx->flc_lease))
  2245. return;
  2246. spin_lock(&ctx->flc_lock);
  2247. list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
  2248. if (filp == fl->fl_file)
  2249. lease_modify(fl, F_UNLCK, &dispose);
  2250. spin_unlock(&ctx->flc_lock);
  2251. locks_dispose_list(&dispose);
  2252. }
  2253. /*
  2254. * This function is called on the last close of an open file.
  2255. */
  2256. void locks_remove_file(struct file *filp)
  2257. {
  2258. struct file_lock_context *ctx;
  2259. ctx = smp_load_acquire(&file_inode(filp)->i_flctx);
  2260. if (!ctx)
  2261. return;
  2262. /* remove any OFD locks */
  2263. locks_remove_posix(filp, filp);
  2264. /* remove flock locks */
  2265. locks_remove_flock(filp, ctx);
  2266. /* remove any leases */
  2267. locks_remove_lease(filp, ctx);
  2268. }
  2269. /**
  2270. * posix_unblock_lock - stop waiting for a file lock
  2271. * @waiter: the lock which was waiting
  2272. *
  2273. * lockd needs to block waiting for locks.
  2274. */
  2275. int
  2276. posix_unblock_lock(struct file_lock *waiter)
  2277. {
  2278. int status = 0;
  2279. spin_lock(&blocked_lock_lock);
  2280. if (waiter->fl_next)
  2281. __locks_delete_block(waiter);
  2282. else
  2283. status = -ENOENT;
  2284. spin_unlock(&blocked_lock_lock);
  2285. return status;
  2286. }
  2287. EXPORT_SYMBOL(posix_unblock_lock);
  2288. /**
  2289. * vfs_cancel_lock - file byte range unblock lock
  2290. * @filp: The file to apply the unblock to
  2291. * @fl: The lock to be unblocked
  2292. *
  2293. * Used by lock managers to cancel blocked requests
  2294. */
  2295. int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
  2296. {
  2297. if (filp->f_op->lock)
  2298. return filp->f_op->lock(filp, F_CANCELLK, fl);
  2299. return 0;
  2300. }
  2301. EXPORT_SYMBOL_GPL(vfs_cancel_lock);
  2302. #ifdef CONFIG_PROC_FS
  2303. #include <linux/proc_fs.h>
  2304. #include <linux/seq_file.h>
  2305. struct locks_iterator {
  2306. int li_cpu;
  2307. loff_t li_pos;
  2308. };
  2309. static void lock_get_status(struct seq_file *f, struct file_lock *fl,
  2310. loff_t id, char *pfx)
  2311. {
  2312. struct inode *inode = NULL;
  2313. unsigned int fl_pid;
  2314. if (fl->fl_nspid) {
  2315. struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
  2316. /* Don't let fl_pid change based on who is reading the file */
  2317. fl_pid = pid_nr_ns(fl->fl_nspid, proc_pidns);
  2318. /*
  2319. * If there isn't a fl_pid don't display who is waiting on
  2320. * the lock if we are called from locks_show, or if we are
  2321. * called from __show_fd_info - skip lock entirely
  2322. */
  2323. if (fl_pid == 0)
  2324. return;
  2325. } else
  2326. fl_pid = fl->fl_pid;
  2327. if (fl->fl_file != NULL)
  2328. inode = file_inode(fl->fl_file);
  2329. seq_printf(f, "%lld:%s ", id, pfx);
  2330. if (IS_POSIX(fl)) {
  2331. if (fl->fl_flags & FL_ACCESS)
  2332. seq_puts(f, "ACCESS");
  2333. else if (IS_OFDLCK(fl))
  2334. seq_puts(f, "OFDLCK");
  2335. else
  2336. seq_puts(f, "POSIX ");
  2337. seq_printf(f, " %s ",
  2338. (inode == NULL) ? "*NOINODE*" :
  2339. mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
  2340. } else if (IS_FLOCK(fl)) {
  2341. if (fl->fl_type & LOCK_MAND) {
  2342. seq_puts(f, "FLOCK MSNFS ");
  2343. } else {
  2344. seq_puts(f, "FLOCK ADVISORY ");
  2345. }
  2346. } else if (IS_LEASE(fl)) {
  2347. if (fl->fl_flags & FL_DELEG)
  2348. seq_puts(f, "DELEG ");
  2349. else
  2350. seq_puts(f, "LEASE ");
  2351. if (lease_breaking(fl))
  2352. seq_puts(f, "BREAKING ");
  2353. else if (fl->fl_file)
  2354. seq_puts(f, "ACTIVE ");
  2355. else
  2356. seq_puts(f, "BREAKER ");
  2357. } else {
  2358. seq_puts(f, "UNKNOWN UNKNOWN ");
  2359. }
  2360. if (fl->fl_type & LOCK_MAND) {
  2361. seq_printf(f, "%s ",
  2362. (fl->fl_type & LOCK_READ)
  2363. ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
  2364. : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
  2365. } else {
  2366. seq_printf(f, "%s ",
  2367. (lease_breaking(fl))
  2368. ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
  2369. : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
  2370. }
  2371. if (inode) {
  2372. /* userspace relies on this representation of dev_t */
  2373. seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
  2374. MAJOR(inode->i_sb->s_dev),
  2375. MINOR(inode->i_sb->s_dev), inode->i_ino);
  2376. } else {
  2377. seq_printf(f, "%d <none>:0 ", fl_pid);
  2378. }
  2379. if (IS_POSIX(fl)) {
  2380. if (fl->fl_end == OFFSET_MAX)
  2381. seq_printf(f, "%Ld EOF\n", fl->fl_start);
  2382. else
  2383. seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
  2384. } else {
  2385. seq_puts(f, "0 EOF\n");
  2386. }
  2387. }
  2388. static int locks_show(struct seq_file *f, void *v)
  2389. {
  2390. struct locks_iterator *iter = f->private;
  2391. struct file_lock *fl, *bfl;
  2392. struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
  2393. fl = hlist_entry(v, struct file_lock, fl_link);
  2394. if (fl->fl_nspid && !pid_nr_ns(fl->fl_nspid, proc_pidns))
  2395. return 0;
  2396. lock_get_status(f, fl, iter->li_pos, "");
  2397. list_for_each_entry(bfl, &fl->fl_block, fl_block)
  2398. lock_get_status(f, bfl, iter->li_pos, " ->");
  2399. return 0;
  2400. }
  2401. static void __show_fd_locks(struct seq_file *f,
  2402. struct list_head *head, int *id,
  2403. struct file *filp, struct files_struct *files)
  2404. {
  2405. struct file_lock *fl;
  2406. list_for_each_entry(fl, head, fl_list) {
  2407. if (filp != fl->fl_file)
  2408. continue;
  2409. if (fl->fl_owner != files &&
  2410. fl->fl_owner != filp)
  2411. continue;
  2412. (*id)++;
  2413. seq_puts(f, "lock:\t");
  2414. lock_get_status(f, fl, *id, "");
  2415. }
  2416. }
  2417. void show_fd_locks(struct seq_file *f,
  2418. struct file *filp, struct files_struct *files)
  2419. {
  2420. struct inode *inode = file_inode(filp);
  2421. struct file_lock_context *ctx;
  2422. int id = 0;
  2423. ctx = smp_load_acquire(&inode->i_flctx);
  2424. if (!ctx)
  2425. return;
  2426. spin_lock(&ctx->flc_lock);
  2427. __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
  2428. __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
  2429. __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
  2430. spin_unlock(&ctx->flc_lock);
  2431. }
  2432. static void *locks_start(struct seq_file *f, loff_t *pos)
  2433. __acquires(&blocked_lock_lock)
  2434. {
  2435. struct locks_iterator *iter = f->private;
  2436. iter->li_pos = *pos + 1;
  2437. percpu_down_write(&file_rwsem);
  2438. spin_lock(&blocked_lock_lock);
  2439. return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
  2440. }
  2441. static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
  2442. {
  2443. struct locks_iterator *iter = f->private;
  2444. ++iter->li_pos;
  2445. return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
  2446. }
  2447. static void locks_stop(struct seq_file *f, void *v)
  2448. __releases(&blocked_lock_lock)
  2449. {
  2450. spin_unlock(&blocked_lock_lock);
  2451. percpu_up_write(&file_rwsem);
  2452. }
  2453. static const struct seq_operations locks_seq_operations = {
  2454. .start = locks_start,
  2455. .next = locks_next,
  2456. .stop = locks_stop,
  2457. .show = locks_show,
  2458. };
  2459. static int locks_open(struct inode *inode, struct file *filp)
  2460. {
  2461. return seq_open_private(filp, &locks_seq_operations,
  2462. sizeof(struct locks_iterator));
  2463. }
  2464. static const struct file_operations proc_locks_operations = {
  2465. .open = locks_open,
  2466. .read = seq_read,
  2467. .llseek = seq_lseek,
  2468. .release = seq_release_private,
  2469. };
  2470. static int __init proc_locks_init(void)
  2471. {
  2472. proc_create("locks", 0, NULL, &proc_locks_operations);
  2473. return 0;
  2474. }
  2475. fs_initcall(proc_locks_init);
  2476. #endif
  2477. static int __init filelock_init(void)
  2478. {
  2479. int i;
  2480. flctx_cache = kmem_cache_create("file_lock_ctx",
  2481. sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
  2482. filelock_cache = kmem_cache_create("file_lock_cache",
  2483. sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
  2484. for_each_possible_cpu(i) {
  2485. struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
  2486. spin_lock_init(&fll->lock);
  2487. INIT_HLIST_HEAD(&fll->hlist);
  2488. }
  2489. return 0;
  2490. }
  2491. core_initcall(filelock_init);