cache.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856
  1. /*
  2. * net/sunrpc/cache.c
  3. *
  4. * Generic code for various authentication-related caches
  5. * used by sunrpc clients and servers.
  6. *
  7. * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
  8. *
  9. * Released under terms in GPL version 2. See COPYING.
  10. *
  11. */
  12. #include <linux/types.h>
  13. #include <linux/fs.h>
  14. #include <linux/file.h>
  15. #include <linux/slab.h>
  16. #include <linux/signal.h>
  17. #include <linux/sched.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/string_helpers.h>
  23. #include <asm/uaccess.h>
  24. #include <linux/poll.h>
  25. #include <linux/seq_file.h>
  26. #include <linux/proc_fs.h>
  27. #include <linux/net.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/mutex.h>
  30. #include <linux/pagemap.h>
  31. #include <asm/ioctls.h>
  32. #include <linux/sunrpc/types.h>
  33. #include <linux/sunrpc/cache.h>
  34. #include <linux/sunrpc/stats.h>
  35. #include <linux/sunrpc/rpc_pipe_fs.h>
  36. #include "netns.h"
  37. #define RPCDBG_FACILITY RPCDBG_CACHE
  38. static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  39. static void cache_revisit_request(struct cache_head *item);
  40. static void cache_init(struct cache_head *h, struct cache_detail *detail)
  41. {
  42. time_t now = seconds_since_boot();
  43. INIT_HLIST_NODE(&h->cache_list);
  44. h->flags = 0;
  45. kref_init(&h->ref);
  46. h->expiry_time = now + CACHE_NEW_EXPIRY;
  47. if (now <= detail->flush_time)
  48. /* ensure it isn't already expired */
  49. now = detail->flush_time + 1;
  50. h->last_refresh = now;
  51. }
  52. struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
  53. struct cache_head *key, int hash)
  54. {
  55. struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
  56. struct hlist_head *head;
  57. head = &detail->hash_table[hash];
  58. read_lock(&detail->hash_lock);
  59. hlist_for_each_entry(tmp, head, cache_list) {
  60. if (detail->match(tmp, key)) {
  61. if (cache_is_expired(detail, tmp))
  62. /* This entry is expired, we will discard it. */
  63. break;
  64. cache_get(tmp);
  65. read_unlock(&detail->hash_lock);
  66. return tmp;
  67. }
  68. }
  69. read_unlock(&detail->hash_lock);
  70. /* Didn't find anything, insert an empty entry */
  71. new = detail->alloc();
  72. if (!new)
  73. return NULL;
  74. /* must fully initialise 'new', else
  75. * we might get lose if we need to
  76. * cache_put it soon.
  77. */
  78. cache_init(new, detail);
  79. detail->init(new, key);
  80. write_lock(&detail->hash_lock);
  81. /* check if entry appeared while we slept */
  82. hlist_for_each_entry(tmp, head, cache_list) {
  83. if (detail->match(tmp, key)) {
  84. if (cache_is_expired(detail, tmp)) {
  85. hlist_del_init(&tmp->cache_list);
  86. detail->entries --;
  87. freeme = tmp;
  88. break;
  89. }
  90. cache_get(tmp);
  91. write_unlock(&detail->hash_lock);
  92. cache_put(new, detail);
  93. return tmp;
  94. }
  95. }
  96. hlist_add_head(&new->cache_list, head);
  97. detail->entries++;
  98. cache_get(new);
  99. write_unlock(&detail->hash_lock);
  100. if (freeme)
  101. cache_put(freeme, detail);
  102. return new;
  103. }
  104. EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
  105. static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
  106. static void cache_fresh_locked(struct cache_head *head, time_t expiry,
  107. struct cache_detail *detail)
  108. {
  109. time_t now = seconds_since_boot();
  110. if (now <= detail->flush_time)
  111. /* ensure it isn't immediately treated as expired */
  112. now = detail->flush_time + 1;
  113. head->expiry_time = expiry;
  114. head->last_refresh = now;
  115. smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
  116. set_bit(CACHE_VALID, &head->flags);
  117. }
  118. static void cache_fresh_unlocked(struct cache_head *head,
  119. struct cache_detail *detail)
  120. {
  121. if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
  122. cache_revisit_request(head);
  123. cache_dequeue(detail, head);
  124. }
  125. }
  126. struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
  127. struct cache_head *new, struct cache_head *old, int hash)
  128. {
  129. /* The 'old' entry is to be replaced by 'new'.
  130. * If 'old' is not VALID, we update it directly,
  131. * otherwise we need to replace it
  132. */
  133. struct cache_head *tmp;
  134. if (!test_bit(CACHE_VALID, &old->flags)) {
  135. write_lock(&detail->hash_lock);
  136. if (!test_bit(CACHE_VALID, &old->flags)) {
  137. if (test_bit(CACHE_NEGATIVE, &new->flags))
  138. set_bit(CACHE_NEGATIVE, &old->flags);
  139. else
  140. detail->update(old, new);
  141. cache_fresh_locked(old, new->expiry_time, detail);
  142. write_unlock(&detail->hash_lock);
  143. cache_fresh_unlocked(old, detail);
  144. return old;
  145. }
  146. write_unlock(&detail->hash_lock);
  147. }
  148. /* We need to insert a new entry */
  149. tmp = detail->alloc();
  150. if (!tmp) {
  151. cache_put(old, detail);
  152. return NULL;
  153. }
  154. cache_init(tmp, detail);
  155. detail->init(tmp, old);
  156. write_lock(&detail->hash_lock);
  157. if (test_bit(CACHE_NEGATIVE, &new->flags))
  158. set_bit(CACHE_NEGATIVE, &tmp->flags);
  159. else
  160. detail->update(tmp, new);
  161. hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
  162. detail->entries++;
  163. cache_get(tmp);
  164. cache_fresh_locked(tmp, new->expiry_time, detail);
  165. cache_fresh_locked(old, 0, detail);
  166. write_unlock(&detail->hash_lock);
  167. cache_fresh_unlocked(tmp, detail);
  168. cache_fresh_unlocked(old, detail);
  169. cache_put(old, detail);
  170. return tmp;
  171. }
  172. EXPORT_SYMBOL_GPL(sunrpc_cache_update);
  173. static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
  174. {
  175. if (cd->cache_upcall)
  176. return cd->cache_upcall(cd, h);
  177. return sunrpc_cache_pipe_upcall(cd, h);
  178. }
  179. static inline int cache_is_valid(struct cache_head *h)
  180. {
  181. if (!test_bit(CACHE_VALID, &h->flags))
  182. return -EAGAIN;
  183. else {
  184. /* entry is valid */
  185. if (test_bit(CACHE_NEGATIVE, &h->flags))
  186. return -ENOENT;
  187. else {
  188. /*
  189. * In combination with write barrier in
  190. * sunrpc_cache_update, ensures that anyone
  191. * using the cache entry after this sees the
  192. * updated contents:
  193. */
  194. smp_rmb();
  195. return 0;
  196. }
  197. }
  198. }
  199. static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
  200. {
  201. int rv;
  202. write_lock(&detail->hash_lock);
  203. rv = cache_is_valid(h);
  204. if (rv == -EAGAIN) {
  205. set_bit(CACHE_NEGATIVE, &h->flags);
  206. cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
  207. detail);
  208. rv = -ENOENT;
  209. }
  210. write_unlock(&detail->hash_lock);
  211. cache_fresh_unlocked(h, detail);
  212. return rv;
  213. }
  214. /*
  215. * This is the generic cache management routine for all
  216. * the authentication caches.
  217. * It checks the currency of a cache item and will (later)
  218. * initiate an upcall to fill it if needed.
  219. *
  220. *
  221. * Returns 0 if the cache_head can be used, or cache_puts it and returns
  222. * -EAGAIN if upcall is pending and request has been queued
  223. * -ETIMEDOUT if upcall failed or request could not be queue or
  224. * upcall completed but item is still invalid (implying that
  225. * the cache item has been replaced with a newer one).
  226. * -ENOENT if cache entry was negative
  227. */
  228. int cache_check(struct cache_detail *detail,
  229. struct cache_head *h, struct cache_req *rqstp)
  230. {
  231. int rv;
  232. long refresh_age, age;
  233. /* First decide return status as best we can */
  234. rv = cache_is_valid(h);
  235. /* now see if we want to start an upcall */
  236. refresh_age = (h->expiry_time - h->last_refresh);
  237. age = seconds_since_boot() - h->last_refresh;
  238. if (rqstp == NULL) {
  239. if (rv == -EAGAIN)
  240. rv = -ENOENT;
  241. } else if (rv == -EAGAIN ||
  242. (h->expiry_time != 0 && age > refresh_age/2)) {
  243. dprintk("RPC: Want update, refage=%ld, age=%ld\n",
  244. refresh_age, age);
  245. if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
  246. switch (cache_make_upcall(detail, h)) {
  247. case -EINVAL:
  248. rv = try_to_negate_entry(detail, h);
  249. break;
  250. case -EAGAIN:
  251. cache_fresh_unlocked(h, detail);
  252. break;
  253. }
  254. }
  255. }
  256. if (rv == -EAGAIN) {
  257. if (!cache_defer_req(rqstp, h)) {
  258. /*
  259. * Request was not deferred; handle it as best
  260. * we can ourselves:
  261. */
  262. rv = cache_is_valid(h);
  263. if (rv == -EAGAIN)
  264. rv = -ETIMEDOUT;
  265. }
  266. }
  267. if (rv)
  268. cache_put(h, detail);
  269. return rv;
  270. }
  271. EXPORT_SYMBOL_GPL(cache_check);
  272. /*
  273. * caches need to be periodically cleaned.
  274. * For this we maintain a list of cache_detail and
  275. * a current pointer into that list and into the table
  276. * for that entry.
  277. *
  278. * Each time cache_clean is called it finds the next non-empty entry
  279. * in the current table and walks the list in that entry
  280. * looking for entries that can be removed.
  281. *
  282. * An entry gets removed if:
  283. * - The expiry is before current time
  284. * - The last_refresh time is before the flush_time for that cache
  285. *
  286. * later we might drop old entries with non-NEVER expiry if that table
  287. * is getting 'full' for some definition of 'full'
  288. *
  289. * The question of "how often to scan a table" is an interesting one
  290. * and is answered in part by the use of the "nextcheck" field in the
  291. * cache_detail.
  292. * When a scan of a table begins, the nextcheck field is set to a time
  293. * that is well into the future.
  294. * While scanning, if an expiry time is found that is earlier than the
  295. * current nextcheck time, nextcheck is set to that expiry time.
  296. * If the flush_time is ever set to a time earlier than the nextcheck
  297. * time, the nextcheck time is then set to that flush_time.
  298. *
  299. * A table is then only scanned if the current time is at least
  300. * the nextcheck time.
  301. *
  302. */
  303. static LIST_HEAD(cache_list);
  304. static DEFINE_SPINLOCK(cache_list_lock);
  305. static struct cache_detail *current_detail;
  306. static int current_index;
  307. static void do_cache_clean(struct work_struct *work);
  308. static struct delayed_work cache_cleaner;
  309. void sunrpc_init_cache_detail(struct cache_detail *cd)
  310. {
  311. rwlock_init(&cd->hash_lock);
  312. INIT_LIST_HEAD(&cd->queue);
  313. spin_lock(&cache_list_lock);
  314. cd->nextcheck = 0;
  315. cd->entries = 0;
  316. atomic_set(&cd->readers, 0);
  317. cd->last_close = 0;
  318. cd->last_warn = -1;
  319. list_add(&cd->others, &cache_list);
  320. spin_unlock(&cache_list_lock);
  321. /* start the cleaning process */
  322. schedule_delayed_work(&cache_cleaner, 0);
  323. }
  324. EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
  325. void sunrpc_destroy_cache_detail(struct cache_detail *cd)
  326. {
  327. cache_purge(cd);
  328. spin_lock(&cache_list_lock);
  329. write_lock(&cd->hash_lock);
  330. if (cd->entries || atomic_read(&cd->inuse)) {
  331. write_unlock(&cd->hash_lock);
  332. spin_unlock(&cache_list_lock);
  333. goto out;
  334. }
  335. if (current_detail == cd)
  336. current_detail = NULL;
  337. list_del_init(&cd->others);
  338. write_unlock(&cd->hash_lock);
  339. spin_unlock(&cache_list_lock);
  340. if (list_empty(&cache_list)) {
  341. /* module must be being unloaded so its safe to kill the worker */
  342. cancel_delayed_work_sync(&cache_cleaner);
  343. }
  344. return;
  345. out:
  346. printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name);
  347. }
  348. EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
  349. /* clean cache tries to find something to clean
  350. * and cleans it.
  351. * It returns 1 if it cleaned something,
  352. * 0 if it didn't find anything this time
  353. * -1 if it fell off the end of the list.
  354. */
  355. static int cache_clean(void)
  356. {
  357. int rv = 0;
  358. struct list_head *next;
  359. spin_lock(&cache_list_lock);
  360. /* find a suitable table if we don't already have one */
  361. while (current_detail == NULL ||
  362. current_index >= current_detail->hash_size) {
  363. if (current_detail)
  364. next = current_detail->others.next;
  365. else
  366. next = cache_list.next;
  367. if (next == &cache_list) {
  368. current_detail = NULL;
  369. spin_unlock(&cache_list_lock);
  370. return -1;
  371. }
  372. current_detail = list_entry(next, struct cache_detail, others);
  373. if (current_detail->nextcheck > seconds_since_boot())
  374. current_index = current_detail->hash_size;
  375. else {
  376. current_index = 0;
  377. current_detail->nextcheck = seconds_since_boot()+30*60;
  378. }
  379. }
  380. /* find a non-empty bucket in the table */
  381. while (current_detail &&
  382. current_index < current_detail->hash_size &&
  383. hlist_empty(&current_detail->hash_table[current_index]))
  384. current_index++;
  385. /* find a cleanable entry in the bucket and clean it, or set to next bucket */
  386. if (current_detail && current_index < current_detail->hash_size) {
  387. struct cache_head *ch = NULL;
  388. struct cache_detail *d;
  389. struct hlist_head *head;
  390. struct hlist_node *tmp;
  391. write_lock(&current_detail->hash_lock);
  392. /* Ok, now to clean this strand */
  393. head = &current_detail->hash_table[current_index];
  394. hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
  395. if (current_detail->nextcheck > ch->expiry_time)
  396. current_detail->nextcheck = ch->expiry_time+1;
  397. if (!cache_is_expired(current_detail, ch))
  398. continue;
  399. hlist_del_init(&ch->cache_list);
  400. current_detail->entries--;
  401. rv = 1;
  402. break;
  403. }
  404. write_unlock(&current_detail->hash_lock);
  405. d = current_detail;
  406. if (!ch)
  407. current_index ++;
  408. spin_unlock(&cache_list_lock);
  409. if (ch) {
  410. set_bit(CACHE_CLEANED, &ch->flags);
  411. cache_fresh_unlocked(ch, d);
  412. cache_put(ch, d);
  413. }
  414. } else
  415. spin_unlock(&cache_list_lock);
  416. return rv;
  417. }
  418. /*
  419. * We want to regularly clean the cache, so we need to schedule some work ...
  420. */
  421. static void do_cache_clean(struct work_struct *work)
  422. {
  423. int delay = 5;
  424. if (cache_clean() == -1)
  425. delay = round_jiffies_relative(30*HZ);
  426. if (list_empty(&cache_list))
  427. delay = 0;
  428. if (delay)
  429. schedule_delayed_work(&cache_cleaner, delay);
  430. }
  431. /*
  432. * Clean all caches promptly. This just calls cache_clean
  433. * repeatedly until we are sure that every cache has had a chance to
  434. * be fully cleaned
  435. */
  436. void cache_flush(void)
  437. {
  438. while (cache_clean() != -1)
  439. cond_resched();
  440. while (cache_clean() != -1)
  441. cond_resched();
  442. }
  443. EXPORT_SYMBOL_GPL(cache_flush);
  444. void cache_purge(struct cache_detail *detail)
  445. {
  446. time_t now = seconds_since_boot();
  447. if (detail->flush_time >= now)
  448. now = detail->flush_time + 1;
  449. /* 'now' is the maximum value any 'last_refresh' can have */
  450. detail->flush_time = now;
  451. detail->nextcheck = seconds_since_boot();
  452. cache_flush();
  453. }
  454. EXPORT_SYMBOL_GPL(cache_purge);
  455. /*
  456. * Deferral and Revisiting of Requests.
  457. *
  458. * If a cache lookup finds a pending entry, we
  459. * need to defer the request and revisit it later.
  460. * All deferred requests are stored in a hash table,
  461. * indexed by "struct cache_head *".
  462. * As it may be wasteful to store a whole request
  463. * structure, we allow the request to provide a
  464. * deferred form, which must contain a
  465. * 'struct cache_deferred_req'
  466. * This cache_deferred_req contains a method to allow
  467. * it to be revisited when cache info is available
  468. */
  469. #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
  470. #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
  471. #define DFR_MAX 300 /* ??? */
  472. static DEFINE_SPINLOCK(cache_defer_lock);
  473. static LIST_HEAD(cache_defer_list);
  474. static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
  475. static int cache_defer_cnt;
  476. static void __unhash_deferred_req(struct cache_deferred_req *dreq)
  477. {
  478. hlist_del_init(&dreq->hash);
  479. if (!list_empty(&dreq->recent)) {
  480. list_del_init(&dreq->recent);
  481. cache_defer_cnt--;
  482. }
  483. }
  484. static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
  485. {
  486. int hash = DFR_HASH(item);
  487. INIT_LIST_HEAD(&dreq->recent);
  488. hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
  489. }
  490. static void setup_deferral(struct cache_deferred_req *dreq,
  491. struct cache_head *item,
  492. int count_me)
  493. {
  494. dreq->item = item;
  495. spin_lock(&cache_defer_lock);
  496. __hash_deferred_req(dreq, item);
  497. if (count_me) {
  498. cache_defer_cnt++;
  499. list_add(&dreq->recent, &cache_defer_list);
  500. }
  501. spin_unlock(&cache_defer_lock);
  502. }
  503. struct thread_deferred_req {
  504. struct cache_deferred_req handle;
  505. struct completion completion;
  506. };
  507. static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
  508. {
  509. struct thread_deferred_req *dr =
  510. container_of(dreq, struct thread_deferred_req, handle);
  511. complete(&dr->completion);
  512. }
  513. static void cache_wait_req(struct cache_req *req, struct cache_head *item)
  514. {
  515. struct thread_deferred_req sleeper;
  516. struct cache_deferred_req *dreq = &sleeper.handle;
  517. sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
  518. dreq->revisit = cache_restart_thread;
  519. setup_deferral(dreq, item, 0);
  520. if (!test_bit(CACHE_PENDING, &item->flags) ||
  521. wait_for_completion_interruptible_timeout(
  522. &sleeper.completion, req->thread_wait) <= 0) {
  523. /* The completion wasn't completed, so we need
  524. * to clean up
  525. */
  526. spin_lock(&cache_defer_lock);
  527. if (!hlist_unhashed(&sleeper.handle.hash)) {
  528. __unhash_deferred_req(&sleeper.handle);
  529. spin_unlock(&cache_defer_lock);
  530. } else {
  531. /* cache_revisit_request already removed
  532. * this from the hash table, but hasn't
  533. * called ->revisit yet. It will very soon
  534. * and we need to wait for it.
  535. */
  536. spin_unlock(&cache_defer_lock);
  537. wait_for_completion(&sleeper.completion);
  538. }
  539. }
  540. }
  541. static void cache_limit_defers(void)
  542. {
  543. /* Make sure we haven't exceed the limit of allowed deferred
  544. * requests.
  545. */
  546. struct cache_deferred_req *discard = NULL;
  547. if (cache_defer_cnt <= DFR_MAX)
  548. return;
  549. spin_lock(&cache_defer_lock);
  550. /* Consider removing either the first or the last */
  551. if (cache_defer_cnt > DFR_MAX) {
  552. if (prandom_u32() & 1)
  553. discard = list_entry(cache_defer_list.next,
  554. struct cache_deferred_req, recent);
  555. else
  556. discard = list_entry(cache_defer_list.prev,
  557. struct cache_deferred_req, recent);
  558. __unhash_deferred_req(discard);
  559. }
  560. spin_unlock(&cache_defer_lock);
  561. if (discard)
  562. discard->revisit(discard, 1);
  563. }
  564. /* Return true if and only if a deferred request is queued. */
  565. static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
  566. {
  567. struct cache_deferred_req *dreq;
  568. if (req->thread_wait) {
  569. cache_wait_req(req, item);
  570. if (!test_bit(CACHE_PENDING, &item->flags))
  571. return false;
  572. }
  573. dreq = req->defer(req);
  574. if (dreq == NULL)
  575. return false;
  576. setup_deferral(dreq, item, 1);
  577. if (!test_bit(CACHE_PENDING, &item->flags))
  578. /* Bit could have been cleared before we managed to
  579. * set up the deferral, so need to revisit just in case
  580. */
  581. cache_revisit_request(item);
  582. cache_limit_defers();
  583. return true;
  584. }
  585. static void cache_revisit_request(struct cache_head *item)
  586. {
  587. struct cache_deferred_req *dreq;
  588. struct list_head pending;
  589. struct hlist_node *tmp;
  590. int hash = DFR_HASH(item);
  591. INIT_LIST_HEAD(&pending);
  592. spin_lock(&cache_defer_lock);
  593. hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
  594. if (dreq->item == item) {
  595. __unhash_deferred_req(dreq);
  596. list_add(&dreq->recent, &pending);
  597. }
  598. spin_unlock(&cache_defer_lock);
  599. while (!list_empty(&pending)) {
  600. dreq = list_entry(pending.next, struct cache_deferred_req, recent);
  601. list_del_init(&dreq->recent);
  602. dreq->revisit(dreq, 0);
  603. }
  604. }
  605. void cache_clean_deferred(void *owner)
  606. {
  607. struct cache_deferred_req *dreq, *tmp;
  608. struct list_head pending;
  609. INIT_LIST_HEAD(&pending);
  610. spin_lock(&cache_defer_lock);
  611. list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
  612. if (dreq->owner == owner) {
  613. __unhash_deferred_req(dreq);
  614. list_add(&dreq->recent, &pending);
  615. }
  616. }
  617. spin_unlock(&cache_defer_lock);
  618. while (!list_empty(&pending)) {
  619. dreq = list_entry(pending.next, struct cache_deferred_req, recent);
  620. list_del_init(&dreq->recent);
  621. dreq->revisit(dreq, 1);
  622. }
  623. }
  624. /*
  625. * communicate with user-space
  626. *
  627. * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
  628. * On read, you get a full request, or block.
  629. * On write, an update request is processed.
  630. * Poll works if anything to read, and always allows write.
  631. *
  632. * Implemented by linked list of requests. Each open file has
  633. * a ->private that also exists in this list. New requests are added
  634. * to the end and may wakeup and preceding readers.
  635. * New readers are added to the head. If, on read, an item is found with
  636. * CACHE_UPCALLING clear, we free it from the list.
  637. *
  638. */
  639. static DEFINE_SPINLOCK(queue_lock);
  640. static DEFINE_MUTEX(queue_io_mutex);
  641. struct cache_queue {
  642. struct list_head list;
  643. int reader; /* if 0, then request */
  644. };
  645. struct cache_request {
  646. struct cache_queue q;
  647. struct cache_head *item;
  648. char * buf;
  649. int len;
  650. int readers;
  651. };
  652. struct cache_reader {
  653. struct cache_queue q;
  654. int offset; /* if non-0, we have a refcnt on next request */
  655. };
  656. static int cache_request(struct cache_detail *detail,
  657. struct cache_request *crq)
  658. {
  659. char *bp = crq->buf;
  660. int len = PAGE_SIZE;
  661. detail->cache_request(detail, crq->item, &bp, &len);
  662. if (len < 0)
  663. return -EAGAIN;
  664. return PAGE_SIZE - len;
  665. }
  666. static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
  667. loff_t *ppos, struct cache_detail *cd)
  668. {
  669. struct cache_reader *rp = filp->private_data;
  670. struct cache_request *rq;
  671. struct inode *inode = file_inode(filp);
  672. int err;
  673. if (count == 0)
  674. return 0;
  675. inode_lock(inode); /* protect against multiple concurrent
  676. * readers on this file */
  677. again:
  678. spin_lock(&queue_lock);
  679. /* need to find next request */
  680. while (rp->q.list.next != &cd->queue &&
  681. list_entry(rp->q.list.next, struct cache_queue, list)
  682. ->reader) {
  683. struct list_head *next = rp->q.list.next;
  684. list_move(&rp->q.list, next);
  685. }
  686. if (rp->q.list.next == &cd->queue) {
  687. spin_unlock(&queue_lock);
  688. inode_unlock(inode);
  689. WARN_ON_ONCE(rp->offset);
  690. return 0;
  691. }
  692. rq = container_of(rp->q.list.next, struct cache_request, q.list);
  693. WARN_ON_ONCE(rq->q.reader);
  694. if (rp->offset == 0)
  695. rq->readers++;
  696. spin_unlock(&queue_lock);
  697. if (rq->len == 0) {
  698. err = cache_request(cd, rq);
  699. if (err < 0)
  700. goto out;
  701. rq->len = err;
  702. }
  703. if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
  704. err = -EAGAIN;
  705. spin_lock(&queue_lock);
  706. list_move(&rp->q.list, &rq->q.list);
  707. spin_unlock(&queue_lock);
  708. } else {
  709. if (rp->offset + count > rq->len)
  710. count = rq->len - rp->offset;
  711. err = -EFAULT;
  712. if (copy_to_user(buf, rq->buf + rp->offset, count))
  713. goto out;
  714. rp->offset += count;
  715. if (rp->offset >= rq->len) {
  716. rp->offset = 0;
  717. spin_lock(&queue_lock);
  718. list_move(&rp->q.list, &rq->q.list);
  719. spin_unlock(&queue_lock);
  720. }
  721. err = 0;
  722. }
  723. out:
  724. if (rp->offset == 0) {
  725. /* need to release rq */
  726. spin_lock(&queue_lock);
  727. rq->readers--;
  728. if (rq->readers == 0 &&
  729. !test_bit(CACHE_PENDING, &rq->item->flags)) {
  730. list_del(&rq->q.list);
  731. spin_unlock(&queue_lock);
  732. cache_put(rq->item, cd);
  733. kfree(rq->buf);
  734. kfree(rq);
  735. } else
  736. spin_unlock(&queue_lock);
  737. }
  738. if (err == -EAGAIN)
  739. goto again;
  740. inode_unlock(inode);
  741. return err ? err : count;
  742. }
  743. static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
  744. size_t count, struct cache_detail *cd)
  745. {
  746. ssize_t ret;
  747. if (count == 0)
  748. return -EINVAL;
  749. if (copy_from_user(kaddr, buf, count))
  750. return -EFAULT;
  751. kaddr[count] = '\0';
  752. ret = cd->cache_parse(cd, kaddr, count);
  753. if (!ret)
  754. ret = count;
  755. return ret;
  756. }
  757. static ssize_t cache_slow_downcall(const char __user *buf,
  758. size_t count, struct cache_detail *cd)
  759. {
  760. static char write_buf[8192]; /* protected by queue_io_mutex */
  761. ssize_t ret = -EINVAL;
  762. if (count >= sizeof(write_buf))
  763. goto out;
  764. mutex_lock(&queue_io_mutex);
  765. ret = cache_do_downcall(write_buf, buf, count, cd);
  766. mutex_unlock(&queue_io_mutex);
  767. out:
  768. return ret;
  769. }
  770. static ssize_t cache_downcall(struct address_space *mapping,
  771. const char __user *buf,
  772. size_t count, struct cache_detail *cd)
  773. {
  774. struct page *page;
  775. char *kaddr;
  776. ssize_t ret = -ENOMEM;
  777. if (count >= PAGE_CACHE_SIZE)
  778. goto out_slow;
  779. page = find_or_create_page(mapping, 0, GFP_KERNEL);
  780. if (!page)
  781. goto out_slow;
  782. kaddr = kmap(page);
  783. ret = cache_do_downcall(kaddr, buf, count, cd);
  784. kunmap(page);
  785. unlock_page(page);
  786. page_cache_release(page);
  787. return ret;
  788. out_slow:
  789. return cache_slow_downcall(buf, count, cd);
  790. }
  791. static ssize_t cache_write(struct file *filp, const char __user *buf,
  792. size_t count, loff_t *ppos,
  793. struct cache_detail *cd)
  794. {
  795. struct address_space *mapping = filp->f_mapping;
  796. struct inode *inode = file_inode(filp);
  797. ssize_t ret = -EINVAL;
  798. if (!cd->cache_parse)
  799. goto out;
  800. inode_lock(inode);
  801. ret = cache_downcall(mapping, buf, count, cd);
  802. inode_unlock(inode);
  803. out:
  804. return ret;
  805. }
  806. static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
  807. static unsigned int cache_poll(struct file *filp, poll_table *wait,
  808. struct cache_detail *cd)
  809. {
  810. unsigned int mask;
  811. struct cache_reader *rp = filp->private_data;
  812. struct cache_queue *cq;
  813. poll_wait(filp, &queue_wait, wait);
  814. /* alway allow write */
  815. mask = POLLOUT | POLLWRNORM;
  816. if (!rp)
  817. return mask;
  818. spin_lock(&queue_lock);
  819. for (cq= &rp->q; &cq->list != &cd->queue;
  820. cq = list_entry(cq->list.next, struct cache_queue, list))
  821. if (!cq->reader) {
  822. mask |= POLLIN | POLLRDNORM;
  823. break;
  824. }
  825. spin_unlock(&queue_lock);
  826. return mask;
  827. }
  828. static int cache_ioctl(struct inode *ino, struct file *filp,
  829. unsigned int cmd, unsigned long arg,
  830. struct cache_detail *cd)
  831. {
  832. int len = 0;
  833. struct cache_reader *rp = filp->private_data;
  834. struct cache_queue *cq;
  835. if (cmd != FIONREAD || !rp)
  836. return -EINVAL;
  837. spin_lock(&queue_lock);
  838. /* only find the length remaining in current request,
  839. * or the length of the next request
  840. */
  841. for (cq= &rp->q; &cq->list != &cd->queue;
  842. cq = list_entry(cq->list.next, struct cache_queue, list))
  843. if (!cq->reader) {
  844. struct cache_request *cr =
  845. container_of(cq, struct cache_request, q);
  846. len = cr->len - rp->offset;
  847. break;
  848. }
  849. spin_unlock(&queue_lock);
  850. return put_user(len, (int __user *)arg);
  851. }
  852. static int cache_open(struct inode *inode, struct file *filp,
  853. struct cache_detail *cd)
  854. {
  855. struct cache_reader *rp = NULL;
  856. if (!cd || !try_module_get(cd->owner))
  857. return -EACCES;
  858. nonseekable_open(inode, filp);
  859. if (filp->f_mode & FMODE_READ) {
  860. rp = kmalloc(sizeof(*rp), GFP_KERNEL);
  861. if (!rp) {
  862. module_put(cd->owner);
  863. return -ENOMEM;
  864. }
  865. rp->offset = 0;
  866. rp->q.reader = 1;
  867. atomic_inc(&cd->readers);
  868. spin_lock(&queue_lock);
  869. list_add(&rp->q.list, &cd->queue);
  870. spin_unlock(&queue_lock);
  871. }
  872. filp->private_data = rp;
  873. return 0;
  874. }
  875. static int cache_release(struct inode *inode, struct file *filp,
  876. struct cache_detail *cd)
  877. {
  878. struct cache_reader *rp = filp->private_data;
  879. if (rp) {
  880. spin_lock(&queue_lock);
  881. if (rp->offset) {
  882. struct cache_queue *cq;
  883. for (cq= &rp->q; &cq->list != &cd->queue;
  884. cq = list_entry(cq->list.next, struct cache_queue, list))
  885. if (!cq->reader) {
  886. container_of(cq, struct cache_request, q)
  887. ->readers--;
  888. break;
  889. }
  890. rp->offset = 0;
  891. }
  892. list_del(&rp->q.list);
  893. spin_unlock(&queue_lock);
  894. filp->private_data = NULL;
  895. kfree(rp);
  896. cd->last_close = seconds_since_boot();
  897. atomic_dec(&cd->readers);
  898. }
  899. module_put(cd->owner);
  900. return 0;
  901. }
  902. static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
  903. {
  904. struct cache_queue *cq, *tmp;
  905. struct cache_request *cr;
  906. struct list_head dequeued;
  907. INIT_LIST_HEAD(&dequeued);
  908. spin_lock(&queue_lock);
  909. list_for_each_entry_safe(cq, tmp, &detail->queue, list)
  910. if (!cq->reader) {
  911. cr = container_of(cq, struct cache_request, q);
  912. if (cr->item != ch)
  913. continue;
  914. if (test_bit(CACHE_PENDING, &ch->flags))
  915. /* Lost a race and it is pending again */
  916. break;
  917. if (cr->readers != 0)
  918. continue;
  919. list_move(&cr->q.list, &dequeued);
  920. }
  921. spin_unlock(&queue_lock);
  922. while (!list_empty(&dequeued)) {
  923. cr = list_entry(dequeued.next, struct cache_request, q.list);
  924. list_del(&cr->q.list);
  925. cache_put(cr->item, detail);
  926. kfree(cr->buf);
  927. kfree(cr);
  928. }
  929. }
  930. /*
  931. * Support routines for text-based upcalls.
  932. * Fields are separated by spaces.
  933. * Fields are either mangled to quote space tab newline slosh with slosh
  934. * or a hexified with a leading \x
  935. * Record is terminated with newline.
  936. *
  937. */
  938. void qword_add(char **bpp, int *lp, char *str)
  939. {
  940. char *bp = *bpp;
  941. int len = *lp;
  942. int ret;
  943. if (len < 0) return;
  944. ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
  945. if (ret >= len) {
  946. bp += len;
  947. len = -1;
  948. } else {
  949. bp += ret;
  950. len -= ret;
  951. *bp++ = ' ';
  952. len--;
  953. }
  954. *bpp = bp;
  955. *lp = len;
  956. }
  957. EXPORT_SYMBOL_GPL(qword_add);
  958. void qword_addhex(char **bpp, int *lp, char *buf, int blen)
  959. {
  960. char *bp = *bpp;
  961. int len = *lp;
  962. if (len < 0) return;
  963. if (len > 2) {
  964. *bp++ = '\\';
  965. *bp++ = 'x';
  966. len -= 2;
  967. while (blen && len >= 2) {
  968. bp = hex_byte_pack(bp, *buf++);
  969. len -= 2;
  970. blen--;
  971. }
  972. }
  973. if (blen || len<1) len = -1;
  974. else {
  975. *bp++ = ' ';
  976. len--;
  977. }
  978. *bpp = bp;
  979. *lp = len;
  980. }
  981. EXPORT_SYMBOL_GPL(qword_addhex);
  982. static void warn_no_listener(struct cache_detail *detail)
  983. {
  984. if (detail->last_warn != detail->last_close) {
  985. detail->last_warn = detail->last_close;
  986. if (detail->warn_no_listener)
  987. detail->warn_no_listener(detail, detail->last_close != 0);
  988. }
  989. }
  990. static bool cache_listeners_exist(struct cache_detail *detail)
  991. {
  992. if (atomic_read(&detail->readers))
  993. return true;
  994. if (detail->last_close == 0)
  995. /* This cache was never opened */
  996. return false;
  997. if (detail->last_close < seconds_since_boot() - 30)
  998. /*
  999. * We allow for the possibility that someone might
  1000. * restart a userspace daemon without restarting the
  1001. * server; but after 30 seconds, we give up.
  1002. */
  1003. return false;
  1004. return true;
  1005. }
  1006. /*
  1007. * register an upcall request to user-space and queue it up for read() by the
  1008. * upcall daemon.
  1009. *
  1010. * Each request is at most one page long.
  1011. */
  1012. int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
  1013. {
  1014. char *buf;
  1015. struct cache_request *crq;
  1016. int ret = 0;
  1017. if (!detail->cache_request)
  1018. return -EINVAL;
  1019. if (!cache_listeners_exist(detail)) {
  1020. warn_no_listener(detail);
  1021. return -EINVAL;
  1022. }
  1023. if (test_bit(CACHE_CLEANED, &h->flags))
  1024. /* Too late to make an upcall */
  1025. return -EAGAIN;
  1026. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  1027. if (!buf)
  1028. return -EAGAIN;
  1029. crq = kmalloc(sizeof (*crq), GFP_KERNEL);
  1030. if (!crq) {
  1031. kfree(buf);
  1032. return -EAGAIN;
  1033. }
  1034. crq->q.reader = 0;
  1035. crq->item = cache_get(h);
  1036. crq->buf = buf;
  1037. crq->len = 0;
  1038. crq->readers = 0;
  1039. spin_lock(&queue_lock);
  1040. if (test_bit(CACHE_PENDING, &h->flags))
  1041. list_add_tail(&crq->q.list, &detail->queue);
  1042. else
  1043. /* Lost a race, no longer PENDING, so don't enqueue */
  1044. ret = -EAGAIN;
  1045. spin_unlock(&queue_lock);
  1046. wake_up(&queue_wait);
  1047. if (ret == -EAGAIN) {
  1048. kfree(buf);
  1049. kfree(crq);
  1050. }
  1051. return ret;
  1052. }
  1053. EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
  1054. /*
  1055. * parse a message from user-space and pass it
  1056. * to an appropriate cache
  1057. * Messages are, like requests, separated into fields by
  1058. * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
  1059. *
  1060. * Message is
  1061. * reply cachename expiry key ... content....
  1062. *
  1063. * key and content are both parsed by cache
  1064. */
  1065. int qword_get(char **bpp, char *dest, int bufsize)
  1066. {
  1067. /* return bytes copied, or -1 on error */
  1068. char *bp = *bpp;
  1069. int len = 0;
  1070. while (*bp == ' ') bp++;
  1071. if (bp[0] == '\\' && bp[1] == 'x') {
  1072. /* HEX STRING */
  1073. bp += 2;
  1074. while (len < bufsize) {
  1075. int h, l;
  1076. h = hex_to_bin(bp[0]);
  1077. if (h < 0)
  1078. break;
  1079. l = hex_to_bin(bp[1]);
  1080. if (l < 0)
  1081. break;
  1082. *dest++ = (h << 4) | l;
  1083. bp += 2;
  1084. len++;
  1085. }
  1086. } else {
  1087. /* text with \nnn octal quoting */
  1088. while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
  1089. if (*bp == '\\' &&
  1090. isodigit(bp[1]) && (bp[1] <= '3') &&
  1091. isodigit(bp[2]) &&
  1092. isodigit(bp[3])) {
  1093. int byte = (*++bp -'0');
  1094. bp++;
  1095. byte = (byte << 3) | (*bp++ - '0');
  1096. byte = (byte << 3) | (*bp++ - '0');
  1097. *dest++ = byte;
  1098. len++;
  1099. } else {
  1100. *dest++ = *bp++;
  1101. len++;
  1102. }
  1103. }
  1104. }
  1105. if (*bp != ' ' && *bp != '\n' && *bp != '\0')
  1106. return -1;
  1107. while (*bp == ' ') bp++;
  1108. *bpp = bp;
  1109. *dest = '\0';
  1110. return len;
  1111. }
  1112. EXPORT_SYMBOL_GPL(qword_get);
  1113. /*
  1114. * support /proc/sunrpc/cache/$CACHENAME/content
  1115. * as a seqfile.
  1116. * We call ->cache_show passing NULL for the item to
  1117. * get a header, then pass each real item in the cache
  1118. */
  1119. void *cache_seq_start(struct seq_file *m, loff_t *pos)
  1120. __acquires(cd->hash_lock)
  1121. {
  1122. loff_t n = *pos;
  1123. unsigned int hash, entry;
  1124. struct cache_head *ch;
  1125. struct cache_detail *cd = m->private;
  1126. read_lock(&cd->hash_lock);
  1127. if (!n--)
  1128. return SEQ_START_TOKEN;
  1129. hash = n >> 32;
  1130. entry = n & ((1LL<<32) - 1);
  1131. hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
  1132. if (!entry--)
  1133. return ch;
  1134. n &= ~((1LL<<32) - 1);
  1135. do {
  1136. hash++;
  1137. n += 1LL<<32;
  1138. } while(hash < cd->hash_size &&
  1139. hlist_empty(&cd->hash_table[hash]));
  1140. if (hash >= cd->hash_size)
  1141. return NULL;
  1142. *pos = n+1;
  1143. return hlist_entry_safe(cd->hash_table[hash].first,
  1144. struct cache_head, cache_list);
  1145. }
  1146. EXPORT_SYMBOL_GPL(cache_seq_start);
  1147. void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
  1148. {
  1149. struct cache_head *ch = p;
  1150. int hash = (*pos >> 32);
  1151. struct cache_detail *cd = m->private;
  1152. if (p == SEQ_START_TOKEN)
  1153. hash = 0;
  1154. else if (ch->cache_list.next == NULL) {
  1155. hash++;
  1156. *pos += 1LL<<32;
  1157. } else {
  1158. ++*pos;
  1159. return hlist_entry_safe(ch->cache_list.next,
  1160. struct cache_head, cache_list);
  1161. }
  1162. *pos &= ~((1LL<<32) - 1);
  1163. while (hash < cd->hash_size &&
  1164. hlist_empty(&cd->hash_table[hash])) {
  1165. hash++;
  1166. *pos += 1LL<<32;
  1167. }
  1168. if (hash >= cd->hash_size)
  1169. return NULL;
  1170. ++*pos;
  1171. return hlist_entry_safe(cd->hash_table[hash].first,
  1172. struct cache_head, cache_list);
  1173. }
  1174. EXPORT_SYMBOL_GPL(cache_seq_next);
  1175. void cache_seq_stop(struct seq_file *m, void *p)
  1176. __releases(cd->hash_lock)
  1177. {
  1178. struct cache_detail *cd = m->private;
  1179. read_unlock(&cd->hash_lock);
  1180. }
  1181. EXPORT_SYMBOL_GPL(cache_seq_stop);
  1182. static int c_show(struct seq_file *m, void *p)
  1183. {
  1184. struct cache_head *cp = p;
  1185. struct cache_detail *cd = m->private;
  1186. if (p == SEQ_START_TOKEN)
  1187. return cd->cache_show(m, cd, NULL);
  1188. ifdebug(CACHE)
  1189. seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
  1190. convert_to_wallclock(cp->expiry_time),
  1191. atomic_read(&cp->ref.refcount), cp->flags);
  1192. cache_get(cp);
  1193. if (cache_check(cd, cp, NULL))
  1194. /* cache_check does a cache_put on failure */
  1195. seq_printf(m, "# ");
  1196. else {
  1197. if (cache_is_expired(cd, cp))
  1198. seq_printf(m, "# ");
  1199. cache_put(cp, cd);
  1200. }
  1201. return cd->cache_show(m, cd, cp);
  1202. }
  1203. static const struct seq_operations cache_content_op = {
  1204. .start = cache_seq_start,
  1205. .next = cache_seq_next,
  1206. .stop = cache_seq_stop,
  1207. .show = c_show,
  1208. };
  1209. static int content_open(struct inode *inode, struct file *file,
  1210. struct cache_detail *cd)
  1211. {
  1212. struct seq_file *seq;
  1213. int err;
  1214. if (!cd || !try_module_get(cd->owner))
  1215. return -EACCES;
  1216. err = seq_open(file, &cache_content_op);
  1217. if (err) {
  1218. module_put(cd->owner);
  1219. return err;
  1220. }
  1221. seq = file->private_data;
  1222. seq->private = cd;
  1223. return 0;
  1224. }
  1225. static int content_release(struct inode *inode, struct file *file,
  1226. struct cache_detail *cd)
  1227. {
  1228. int ret = seq_release(inode, file);
  1229. module_put(cd->owner);
  1230. return ret;
  1231. }
  1232. static int open_flush(struct inode *inode, struct file *file,
  1233. struct cache_detail *cd)
  1234. {
  1235. if (!cd || !try_module_get(cd->owner))
  1236. return -EACCES;
  1237. return nonseekable_open(inode, file);
  1238. }
  1239. static int release_flush(struct inode *inode, struct file *file,
  1240. struct cache_detail *cd)
  1241. {
  1242. module_put(cd->owner);
  1243. return 0;
  1244. }
  1245. static ssize_t read_flush(struct file *file, char __user *buf,
  1246. size_t count, loff_t *ppos,
  1247. struct cache_detail *cd)
  1248. {
  1249. char tbuf[22];
  1250. unsigned long p = *ppos;
  1251. size_t len;
  1252. snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
  1253. len = strlen(tbuf);
  1254. if (p >= len)
  1255. return 0;
  1256. len -= p;
  1257. if (len > count)
  1258. len = count;
  1259. if (copy_to_user(buf, (void*)(tbuf+p), len))
  1260. return -EFAULT;
  1261. *ppos += len;
  1262. return len;
  1263. }
  1264. static ssize_t write_flush(struct file *file, const char __user *buf,
  1265. size_t count, loff_t *ppos,
  1266. struct cache_detail *cd)
  1267. {
  1268. char tbuf[20];
  1269. char *bp, *ep;
  1270. time_t then, now;
  1271. if (*ppos || count > sizeof(tbuf)-1)
  1272. return -EINVAL;
  1273. if (copy_from_user(tbuf, buf, count))
  1274. return -EFAULT;
  1275. tbuf[count] = 0;
  1276. simple_strtoul(tbuf, &ep, 0);
  1277. if (*ep && *ep != '\n')
  1278. return -EINVAL;
  1279. bp = tbuf;
  1280. then = get_expiry(&bp);
  1281. now = seconds_since_boot();
  1282. cd->nextcheck = now;
  1283. /* Can only set flush_time to 1 second beyond "now", or
  1284. * possibly 1 second beyond flushtime. This is because
  1285. * flush_time never goes backwards so it mustn't get too far
  1286. * ahead of time.
  1287. */
  1288. if (then >= now) {
  1289. /* Want to flush everything, so behave like cache_purge() */
  1290. if (cd->flush_time >= now)
  1291. now = cd->flush_time + 1;
  1292. then = now;
  1293. }
  1294. cd->flush_time = then;
  1295. cache_flush();
  1296. *ppos += count;
  1297. return count;
  1298. }
  1299. static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
  1300. size_t count, loff_t *ppos)
  1301. {
  1302. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1303. return cache_read(filp, buf, count, ppos, cd);
  1304. }
  1305. static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
  1306. size_t count, loff_t *ppos)
  1307. {
  1308. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1309. return cache_write(filp, buf, count, ppos, cd);
  1310. }
  1311. static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
  1312. {
  1313. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1314. return cache_poll(filp, wait, cd);
  1315. }
  1316. static long cache_ioctl_procfs(struct file *filp,
  1317. unsigned int cmd, unsigned long arg)
  1318. {
  1319. struct inode *inode = file_inode(filp);
  1320. struct cache_detail *cd = PDE_DATA(inode);
  1321. return cache_ioctl(inode, filp, cmd, arg, cd);
  1322. }
  1323. static int cache_open_procfs(struct inode *inode, struct file *filp)
  1324. {
  1325. struct cache_detail *cd = PDE_DATA(inode);
  1326. return cache_open(inode, filp, cd);
  1327. }
  1328. static int cache_release_procfs(struct inode *inode, struct file *filp)
  1329. {
  1330. struct cache_detail *cd = PDE_DATA(inode);
  1331. return cache_release(inode, filp, cd);
  1332. }
  1333. static const struct file_operations cache_file_operations_procfs = {
  1334. .owner = THIS_MODULE,
  1335. .llseek = no_llseek,
  1336. .read = cache_read_procfs,
  1337. .write = cache_write_procfs,
  1338. .poll = cache_poll_procfs,
  1339. .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
  1340. .open = cache_open_procfs,
  1341. .release = cache_release_procfs,
  1342. };
  1343. static int content_open_procfs(struct inode *inode, struct file *filp)
  1344. {
  1345. struct cache_detail *cd = PDE_DATA(inode);
  1346. return content_open(inode, filp, cd);
  1347. }
  1348. static int content_release_procfs(struct inode *inode, struct file *filp)
  1349. {
  1350. struct cache_detail *cd = PDE_DATA(inode);
  1351. return content_release(inode, filp, cd);
  1352. }
  1353. static const struct file_operations content_file_operations_procfs = {
  1354. .open = content_open_procfs,
  1355. .read = seq_read,
  1356. .llseek = seq_lseek,
  1357. .release = content_release_procfs,
  1358. };
  1359. static int open_flush_procfs(struct inode *inode, struct file *filp)
  1360. {
  1361. struct cache_detail *cd = PDE_DATA(inode);
  1362. return open_flush(inode, filp, cd);
  1363. }
  1364. static int release_flush_procfs(struct inode *inode, struct file *filp)
  1365. {
  1366. struct cache_detail *cd = PDE_DATA(inode);
  1367. return release_flush(inode, filp, cd);
  1368. }
  1369. static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
  1370. size_t count, loff_t *ppos)
  1371. {
  1372. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1373. return read_flush(filp, buf, count, ppos, cd);
  1374. }
  1375. static ssize_t write_flush_procfs(struct file *filp,
  1376. const char __user *buf,
  1377. size_t count, loff_t *ppos)
  1378. {
  1379. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1380. return write_flush(filp, buf, count, ppos, cd);
  1381. }
  1382. static const struct file_operations cache_flush_operations_procfs = {
  1383. .open = open_flush_procfs,
  1384. .read = read_flush_procfs,
  1385. .write = write_flush_procfs,
  1386. .release = release_flush_procfs,
  1387. .llseek = no_llseek,
  1388. };
  1389. static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
  1390. {
  1391. struct sunrpc_net *sn;
  1392. if (cd->u.procfs.proc_ent == NULL)
  1393. return;
  1394. if (cd->u.procfs.flush_ent)
  1395. remove_proc_entry("flush", cd->u.procfs.proc_ent);
  1396. if (cd->u.procfs.channel_ent)
  1397. remove_proc_entry("channel", cd->u.procfs.proc_ent);
  1398. if (cd->u.procfs.content_ent)
  1399. remove_proc_entry("content", cd->u.procfs.proc_ent);
  1400. cd->u.procfs.proc_ent = NULL;
  1401. sn = net_generic(net, sunrpc_net_id);
  1402. remove_proc_entry(cd->name, sn->proc_net_rpc);
  1403. }
  1404. #ifdef CONFIG_PROC_FS
  1405. static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
  1406. {
  1407. struct proc_dir_entry *p;
  1408. struct sunrpc_net *sn;
  1409. sn = net_generic(net, sunrpc_net_id);
  1410. cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
  1411. if (cd->u.procfs.proc_ent == NULL)
  1412. goto out_nomem;
  1413. cd->u.procfs.channel_ent = NULL;
  1414. cd->u.procfs.content_ent = NULL;
  1415. p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
  1416. cd->u.procfs.proc_ent,
  1417. &cache_flush_operations_procfs, cd);
  1418. cd->u.procfs.flush_ent = p;
  1419. if (p == NULL)
  1420. goto out_nomem;
  1421. if (cd->cache_request || cd->cache_parse) {
  1422. p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
  1423. cd->u.procfs.proc_ent,
  1424. &cache_file_operations_procfs, cd);
  1425. cd->u.procfs.channel_ent = p;
  1426. if (p == NULL)
  1427. goto out_nomem;
  1428. }
  1429. if (cd->cache_show) {
  1430. p = proc_create_data("content", S_IFREG|S_IRUSR,
  1431. cd->u.procfs.proc_ent,
  1432. &content_file_operations_procfs, cd);
  1433. cd->u.procfs.content_ent = p;
  1434. if (p == NULL)
  1435. goto out_nomem;
  1436. }
  1437. return 0;
  1438. out_nomem:
  1439. remove_cache_proc_entries(cd, net);
  1440. return -ENOMEM;
  1441. }
  1442. #else /* CONFIG_PROC_FS */
  1443. static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
  1444. {
  1445. return 0;
  1446. }
  1447. #endif
  1448. void __init cache_initialize(void)
  1449. {
  1450. INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
  1451. }
  1452. int cache_register_net(struct cache_detail *cd, struct net *net)
  1453. {
  1454. int ret;
  1455. sunrpc_init_cache_detail(cd);
  1456. ret = create_cache_proc_entries(cd, net);
  1457. if (ret)
  1458. sunrpc_destroy_cache_detail(cd);
  1459. return ret;
  1460. }
  1461. EXPORT_SYMBOL_GPL(cache_register_net);
  1462. void cache_unregister_net(struct cache_detail *cd, struct net *net)
  1463. {
  1464. remove_cache_proc_entries(cd, net);
  1465. sunrpc_destroy_cache_detail(cd);
  1466. }
  1467. EXPORT_SYMBOL_GPL(cache_unregister_net);
  1468. struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
  1469. {
  1470. struct cache_detail *cd;
  1471. int i;
  1472. cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
  1473. if (cd == NULL)
  1474. return ERR_PTR(-ENOMEM);
  1475. cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
  1476. GFP_KERNEL);
  1477. if (cd->hash_table == NULL) {
  1478. kfree(cd);
  1479. return ERR_PTR(-ENOMEM);
  1480. }
  1481. for (i = 0; i < cd->hash_size; i++)
  1482. INIT_HLIST_HEAD(&cd->hash_table[i]);
  1483. cd->net = net;
  1484. return cd;
  1485. }
  1486. EXPORT_SYMBOL_GPL(cache_create_net);
  1487. void cache_destroy_net(struct cache_detail *cd, struct net *net)
  1488. {
  1489. kfree(cd->hash_table);
  1490. kfree(cd);
  1491. }
  1492. EXPORT_SYMBOL_GPL(cache_destroy_net);
  1493. static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
  1494. size_t count, loff_t *ppos)
  1495. {
  1496. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1497. return cache_read(filp, buf, count, ppos, cd);
  1498. }
  1499. static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
  1500. size_t count, loff_t *ppos)
  1501. {
  1502. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1503. return cache_write(filp, buf, count, ppos, cd);
  1504. }
  1505. static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
  1506. {
  1507. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1508. return cache_poll(filp, wait, cd);
  1509. }
  1510. static long cache_ioctl_pipefs(struct file *filp,
  1511. unsigned int cmd, unsigned long arg)
  1512. {
  1513. struct inode *inode = file_inode(filp);
  1514. struct cache_detail *cd = RPC_I(inode)->private;
  1515. return cache_ioctl(inode, filp, cmd, arg, cd);
  1516. }
  1517. static int cache_open_pipefs(struct inode *inode, struct file *filp)
  1518. {
  1519. struct cache_detail *cd = RPC_I(inode)->private;
  1520. return cache_open(inode, filp, cd);
  1521. }
  1522. static int cache_release_pipefs(struct inode *inode, struct file *filp)
  1523. {
  1524. struct cache_detail *cd = RPC_I(inode)->private;
  1525. return cache_release(inode, filp, cd);
  1526. }
  1527. const struct file_operations cache_file_operations_pipefs = {
  1528. .owner = THIS_MODULE,
  1529. .llseek = no_llseek,
  1530. .read = cache_read_pipefs,
  1531. .write = cache_write_pipefs,
  1532. .poll = cache_poll_pipefs,
  1533. .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
  1534. .open = cache_open_pipefs,
  1535. .release = cache_release_pipefs,
  1536. };
  1537. static int content_open_pipefs(struct inode *inode, struct file *filp)
  1538. {
  1539. struct cache_detail *cd = RPC_I(inode)->private;
  1540. return content_open(inode, filp, cd);
  1541. }
  1542. static int content_release_pipefs(struct inode *inode, struct file *filp)
  1543. {
  1544. struct cache_detail *cd = RPC_I(inode)->private;
  1545. return content_release(inode, filp, cd);
  1546. }
  1547. const struct file_operations content_file_operations_pipefs = {
  1548. .open = content_open_pipefs,
  1549. .read = seq_read,
  1550. .llseek = seq_lseek,
  1551. .release = content_release_pipefs,
  1552. };
  1553. static int open_flush_pipefs(struct inode *inode, struct file *filp)
  1554. {
  1555. struct cache_detail *cd = RPC_I(inode)->private;
  1556. return open_flush(inode, filp, cd);
  1557. }
  1558. static int release_flush_pipefs(struct inode *inode, struct file *filp)
  1559. {
  1560. struct cache_detail *cd = RPC_I(inode)->private;
  1561. return release_flush(inode, filp, cd);
  1562. }
  1563. static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
  1564. size_t count, loff_t *ppos)
  1565. {
  1566. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1567. return read_flush(filp, buf, count, ppos, cd);
  1568. }
  1569. static ssize_t write_flush_pipefs(struct file *filp,
  1570. const char __user *buf,
  1571. size_t count, loff_t *ppos)
  1572. {
  1573. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1574. return write_flush(filp, buf, count, ppos, cd);
  1575. }
  1576. const struct file_operations cache_flush_operations_pipefs = {
  1577. .open = open_flush_pipefs,
  1578. .read = read_flush_pipefs,
  1579. .write = write_flush_pipefs,
  1580. .release = release_flush_pipefs,
  1581. .llseek = no_llseek,
  1582. };
  1583. int sunrpc_cache_register_pipefs(struct dentry *parent,
  1584. const char *name, umode_t umode,
  1585. struct cache_detail *cd)
  1586. {
  1587. struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
  1588. if (IS_ERR(dir))
  1589. return PTR_ERR(dir);
  1590. cd->u.pipefs.dir = dir;
  1591. return 0;
  1592. }
  1593. EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
  1594. void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
  1595. {
  1596. rpc_remove_cache_dir(cd->u.pipefs.dir);
  1597. cd->u.pipefs.dir = NULL;
  1598. }
  1599. EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);