svc_xprt.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469
  1. /*
  2. * linux/net/sunrpc/svc_xprt.c
  3. *
  4. * Author: Tom Tucker <tom@opengridcomputing.com>
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/errno.h>
  8. #include <linux/freezer.h>
  9. #include <linux/kthread.h>
  10. #include <linux/slab.h>
  11. #include <net/sock.h>
  12. #include <linux/sunrpc/addr.h>
  13. #include <linux/sunrpc/stats.h>
  14. #include <linux/sunrpc/svc_xprt.h>
  15. #include <linux/sunrpc/svcsock.h>
  16. #include <linux/sunrpc/xprt.h>
  17. #include <linux/module.h>
  18. #include <linux/netdevice.h>
  19. #include <trace/events/sunrpc.h>
  20. #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  21. static unsigned int svc_rpc_per_connection_limit __read_mostly;
  22. module_param(svc_rpc_per_connection_limit, uint, 0644);
  23. static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
  24. static int svc_deferred_recv(struct svc_rqst *rqstp);
  25. static struct cache_deferred_req *svc_defer(struct cache_req *req);
  26. static void svc_age_temp_xprts(unsigned long closure);
  27. static void svc_delete_xprt(struct svc_xprt *xprt);
  28. /* apparently the "standard" is that clients close
  29. * idle connections after 5 minutes, servers after
  30. * 6 minutes
  31. * http://www.connectathon.org/talks96/nfstcp.pdf
  32. */
  33. static int svc_conn_age_period = 6*60;
  34. /* List of registered transport classes */
  35. static DEFINE_SPINLOCK(svc_xprt_class_lock);
  36. static LIST_HEAD(svc_xprt_class_list);
  37. /* SMP locking strategy:
  38. *
  39. * svc_pool->sp_lock protects most of the fields of that pool.
  40. * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
  41. * when both need to be taken (rare), svc_serv->sv_lock is first.
  42. * The "service mutex" protects svc_serv->sv_nrthread.
  43. * svc_sock->sk_lock protects the svc_sock->sk_deferred list
  44. * and the ->sk_info_authunix cache.
  45. *
  46. * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
  47. * enqueued multiply. During normal transport processing this bit
  48. * is set by svc_xprt_enqueue and cleared by svc_xprt_received.
  49. * Providers should not manipulate this bit directly.
  50. *
  51. * Some flags can be set to certain values at any time
  52. * providing that certain rules are followed:
  53. *
  54. * XPT_CONN, XPT_DATA:
  55. * - Can be set or cleared at any time.
  56. * - After a set, svc_xprt_enqueue must be called to enqueue
  57. * the transport for processing.
  58. * - After a clear, the transport must be read/accepted.
  59. * If this succeeds, it must be set again.
  60. * XPT_CLOSE:
  61. * - Can set at any time. It is never cleared.
  62. * XPT_DEAD:
  63. * - Can only be set while XPT_BUSY is held which ensures
  64. * that no other thread will be using the transport or will
  65. * try to set XPT_DEAD.
  66. */
  67. int svc_reg_xprt_class(struct svc_xprt_class *xcl)
  68. {
  69. struct svc_xprt_class *cl;
  70. int res = -EEXIST;
  71. dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
  72. INIT_LIST_HEAD(&xcl->xcl_list);
  73. spin_lock(&svc_xprt_class_lock);
  74. /* Make sure there isn't already a class with the same name */
  75. list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
  76. if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
  77. goto out;
  78. }
  79. list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
  80. res = 0;
  81. out:
  82. spin_unlock(&svc_xprt_class_lock);
  83. return res;
  84. }
  85. EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
  86. void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
  87. {
  88. dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
  89. spin_lock(&svc_xprt_class_lock);
  90. list_del_init(&xcl->xcl_list);
  91. spin_unlock(&svc_xprt_class_lock);
  92. }
  93. EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
  94. /*
  95. * Format the transport list for printing
  96. */
  97. int svc_print_xprts(char *buf, int maxlen)
  98. {
  99. struct svc_xprt_class *xcl;
  100. char tmpstr[80];
  101. int len = 0;
  102. buf[0] = '\0';
  103. spin_lock(&svc_xprt_class_lock);
  104. list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
  105. int slen;
  106. sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
  107. slen = strlen(tmpstr);
  108. if (len + slen > maxlen)
  109. break;
  110. len += slen;
  111. strcat(buf, tmpstr);
  112. }
  113. spin_unlock(&svc_xprt_class_lock);
  114. return len;
  115. }
  116. static void svc_xprt_free(struct kref *kref)
  117. {
  118. struct svc_xprt *xprt =
  119. container_of(kref, struct svc_xprt, xpt_ref);
  120. struct module *owner = xprt->xpt_class->xcl_owner;
  121. if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
  122. svcauth_unix_info_release(xprt);
  123. put_net(xprt->xpt_net);
  124. /* See comment on corresponding get in xs_setup_bc_tcp(): */
  125. if (xprt->xpt_bc_xprt)
  126. xprt_put(xprt->xpt_bc_xprt);
  127. if (xprt->xpt_bc_xps)
  128. xprt_switch_put(xprt->xpt_bc_xps);
  129. xprt->xpt_ops->xpo_free(xprt);
  130. module_put(owner);
  131. }
  132. void svc_xprt_put(struct svc_xprt *xprt)
  133. {
  134. kref_put(&xprt->xpt_ref, svc_xprt_free);
  135. }
  136. EXPORT_SYMBOL_GPL(svc_xprt_put);
  137. /*
  138. * Called by transport drivers to initialize the transport independent
  139. * portion of the transport instance.
  140. */
  141. void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
  142. struct svc_xprt *xprt, struct svc_serv *serv)
  143. {
  144. memset(xprt, 0, sizeof(*xprt));
  145. xprt->xpt_class = xcl;
  146. xprt->xpt_ops = xcl->xcl_ops;
  147. kref_init(&xprt->xpt_ref);
  148. xprt->xpt_server = serv;
  149. INIT_LIST_HEAD(&xprt->xpt_list);
  150. INIT_LIST_HEAD(&xprt->xpt_ready);
  151. INIT_LIST_HEAD(&xprt->xpt_deferred);
  152. INIT_LIST_HEAD(&xprt->xpt_users);
  153. mutex_init(&xprt->xpt_mutex);
  154. spin_lock_init(&xprt->xpt_lock);
  155. set_bit(XPT_BUSY, &xprt->xpt_flags);
  156. rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
  157. xprt->xpt_net = get_net(net);
  158. }
  159. EXPORT_SYMBOL_GPL(svc_xprt_init);
  160. static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
  161. struct svc_serv *serv,
  162. struct net *net,
  163. const int family,
  164. const unsigned short port,
  165. int flags)
  166. {
  167. struct sockaddr_in sin = {
  168. .sin_family = AF_INET,
  169. .sin_addr.s_addr = htonl(INADDR_ANY),
  170. .sin_port = htons(port),
  171. };
  172. #if IS_ENABLED(CONFIG_IPV6)
  173. struct sockaddr_in6 sin6 = {
  174. .sin6_family = AF_INET6,
  175. .sin6_addr = IN6ADDR_ANY_INIT,
  176. .sin6_port = htons(port),
  177. };
  178. #endif
  179. struct sockaddr *sap;
  180. size_t len;
  181. switch (family) {
  182. case PF_INET:
  183. sap = (struct sockaddr *)&sin;
  184. len = sizeof(sin);
  185. break;
  186. #if IS_ENABLED(CONFIG_IPV6)
  187. case PF_INET6:
  188. sap = (struct sockaddr *)&sin6;
  189. len = sizeof(sin6);
  190. break;
  191. #endif
  192. default:
  193. return ERR_PTR(-EAFNOSUPPORT);
  194. }
  195. return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
  196. }
  197. /*
  198. * svc_xprt_received conditionally queues the transport for processing
  199. * by another thread. The caller must hold the XPT_BUSY bit and must
  200. * not thereafter touch transport data.
  201. *
  202. * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
  203. * insufficient) data.
  204. */
  205. static void svc_xprt_received(struct svc_xprt *xprt)
  206. {
  207. if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
  208. WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
  209. return;
  210. }
  211. /* As soon as we clear busy, the xprt could be closed and
  212. * 'put', so we need a reference to call svc_enqueue_xprt with:
  213. */
  214. svc_xprt_get(xprt);
  215. smp_mb__before_atomic();
  216. clear_bit(XPT_BUSY, &xprt->xpt_flags);
  217. xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
  218. svc_xprt_put(xprt);
  219. }
  220. void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
  221. {
  222. clear_bit(XPT_TEMP, &new->xpt_flags);
  223. spin_lock_bh(&serv->sv_lock);
  224. list_add(&new->xpt_list, &serv->sv_permsocks);
  225. spin_unlock_bh(&serv->sv_lock);
  226. svc_xprt_received(new);
  227. }
  228. int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
  229. struct net *net, const int family,
  230. const unsigned short port, int flags)
  231. {
  232. struct svc_xprt_class *xcl;
  233. spin_lock(&svc_xprt_class_lock);
  234. list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
  235. struct svc_xprt *newxprt;
  236. unsigned short newport;
  237. if (strcmp(xprt_name, xcl->xcl_name))
  238. continue;
  239. if (!try_module_get(xcl->xcl_owner))
  240. goto err;
  241. spin_unlock(&svc_xprt_class_lock);
  242. newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
  243. if (IS_ERR(newxprt)) {
  244. module_put(xcl->xcl_owner);
  245. return PTR_ERR(newxprt);
  246. }
  247. svc_add_new_perm_xprt(serv, newxprt);
  248. newport = svc_xprt_local_port(newxprt);
  249. return newport;
  250. }
  251. err:
  252. spin_unlock(&svc_xprt_class_lock);
  253. /* This errno is exposed to user space. Provide a reasonable
  254. * perror msg for a bad transport. */
  255. return -EPROTONOSUPPORT;
  256. }
  257. int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
  258. struct net *net, const int family,
  259. const unsigned short port, int flags)
  260. {
  261. int err;
  262. dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
  263. err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
  264. if (err == -EPROTONOSUPPORT) {
  265. request_module("svc%s", xprt_name);
  266. err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
  267. }
  268. if (err)
  269. dprintk("svc: transport %s not found, err %d\n",
  270. xprt_name, err);
  271. return err;
  272. }
  273. EXPORT_SYMBOL_GPL(svc_create_xprt);
  274. /*
  275. * Copy the local and remote xprt addresses to the rqstp structure
  276. */
  277. void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  278. {
  279. memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
  280. rqstp->rq_addrlen = xprt->xpt_remotelen;
  281. /*
  282. * Destination address in request is needed for binding the
  283. * source address in RPC replies/callbacks later.
  284. */
  285. memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
  286. rqstp->rq_daddrlen = xprt->xpt_locallen;
  287. }
  288. EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
  289. /**
  290. * svc_print_addr - Format rq_addr field for printing
  291. * @rqstp: svc_rqst struct containing address to print
  292. * @buf: target buffer for formatted address
  293. * @len: length of target buffer
  294. *
  295. */
  296. char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
  297. {
  298. return __svc_print_addr(svc_addr(rqstp), buf, len);
  299. }
  300. EXPORT_SYMBOL_GPL(svc_print_addr);
  301. static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
  302. {
  303. unsigned int limit = svc_rpc_per_connection_limit;
  304. int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
  305. return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
  306. }
  307. static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  308. {
  309. if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
  310. if (!svc_xprt_slots_in_range(xprt))
  311. return false;
  312. atomic_inc(&xprt->xpt_nr_rqsts);
  313. set_bit(RQ_DATA, &rqstp->rq_flags);
  314. }
  315. return true;
  316. }
  317. static void svc_xprt_release_slot(struct svc_rqst *rqstp)
  318. {
  319. struct svc_xprt *xprt = rqstp->rq_xprt;
  320. if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
  321. atomic_dec(&xprt->xpt_nr_rqsts);
  322. svc_xprt_enqueue(xprt);
  323. }
  324. }
  325. static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
  326. {
  327. if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
  328. return true;
  329. if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) {
  330. if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
  331. svc_xprt_slots_in_range(xprt))
  332. return true;
  333. trace_svc_xprt_no_write_space(xprt);
  334. return false;
  335. }
  336. return false;
  337. }
  338. void svc_xprt_do_enqueue(struct svc_xprt *xprt)
  339. {
  340. struct svc_pool *pool;
  341. struct svc_rqst *rqstp = NULL;
  342. int cpu;
  343. bool queued = false;
  344. if (!svc_xprt_has_something_to_do(xprt))
  345. goto out;
  346. /* Mark transport as busy. It will remain in this state until
  347. * the provider calls svc_xprt_received. We update XPT_BUSY
  348. * atomically because it also guards against trying to enqueue
  349. * the transport twice.
  350. */
  351. if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
  352. /* Don't enqueue transport while already enqueued */
  353. dprintk("svc: transport %p busy, not enqueued\n", xprt);
  354. goto out;
  355. }
  356. cpu = get_cpu();
  357. pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
  358. atomic_long_inc(&pool->sp_stats.packets);
  359. redo_search:
  360. /* find a thread for this xprt */
  361. rcu_read_lock();
  362. list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
  363. /* Do a lockless check first */
  364. if (test_bit(RQ_BUSY, &rqstp->rq_flags))
  365. continue;
  366. /*
  367. * Once the xprt has been queued, it can only be dequeued by
  368. * the task that intends to service it. All we can do at that
  369. * point is to try to wake this thread back up so that it can
  370. * do so.
  371. */
  372. if (!queued) {
  373. spin_lock_bh(&rqstp->rq_lock);
  374. if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
  375. /* already busy, move on... */
  376. spin_unlock_bh(&rqstp->rq_lock);
  377. continue;
  378. }
  379. /* this one will do */
  380. rqstp->rq_xprt = xprt;
  381. svc_xprt_get(xprt);
  382. spin_unlock_bh(&rqstp->rq_lock);
  383. }
  384. rcu_read_unlock();
  385. atomic_long_inc(&pool->sp_stats.threads_woken);
  386. wake_up_process(rqstp->rq_task);
  387. put_cpu();
  388. goto out;
  389. }
  390. rcu_read_unlock();
  391. /*
  392. * We didn't find an idle thread to use, so we need to queue the xprt.
  393. * Do so and then search again. If we find one, we can't hook this one
  394. * up to it directly but we can wake the thread up in the hopes that it
  395. * will pick it up once it searches for a xprt to service.
  396. */
  397. if (!queued) {
  398. queued = true;
  399. dprintk("svc: transport %p put into queue\n", xprt);
  400. spin_lock_bh(&pool->sp_lock);
  401. list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
  402. pool->sp_stats.sockets_queued++;
  403. spin_unlock_bh(&pool->sp_lock);
  404. goto redo_search;
  405. }
  406. rqstp = NULL;
  407. put_cpu();
  408. out:
  409. trace_svc_xprt_do_enqueue(xprt, rqstp);
  410. }
  411. EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
  412. /*
  413. * Queue up a transport with data pending. If there are idle nfsd
  414. * processes, wake 'em up.
  415. *
  416. */
  417. void svc_xprt_enqueue(struct svc_xprt *xprt)
  418. {
  419. if (test_bit(XPT_BUSY, &xprt->xpt_flags))
  420. return;
  421. xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
  422. }
  423. EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
  424. /*
  425. * Dequeue the first transport, if there is one.
  426. */
  427. static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
  428. {
  429. struct svc_xprt *xprt = NULL;
  430. if (list_empty(&pool->sp_sockets))
  431. goto out;
  432. spin_lock_bh(&pool->sp_lock);
  433. if (likely(!list_empty(&pool->sp_sockets))) {
  434. xprt = list_first_entry(&pool->sp_sockets,
  435. struct svc_xprt, xpt_ready);
  436. list_del_init(&xprt->xpt_ready);
  437. svc_xprt_get(xprt);
  438. dprintk("svc: transport %p dequeued, inuse=%d\n",
  439. xprt, kref_read(&xprt->xpt_ref));
  440. }
  441. spin_unlock_bh(&pool->sp_lock);
  442. out:
  443. trace_svc_xprt_dequeue(xprt);
  444. return xprt;
  445. }
  446. /**
  447. * svc_reserve - change the space reserved for the reply to a request.
  448. * @rqstp: The request in question
  449. * @space: new max space to reserve
  450. *
  451. * Each request reserves some space on the output queue of the transport
  452. * to make sure the reply fits. This function reduces that reserved
  453. * space to be the amount of space used already, plus @space.
  454. *
  455. */
  456. void svc_reserve(struct svc_rqst *rqstp, int space)
  457. {
  458. space += rqstp->rq_res.head[0].iov_len;
  459. if (space < rqstp->rq_reserved) {
  460. struct svc_xprt *xprt = rqstp->rq_xprt;
  461. atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
  462. rqstp->rq_reserved = space;
  463. svc_xprt_enqueue(xprt);
  464. }
  465. }
  466. EXPORT_SYMBOL_GPL(svc_reserve);
  467. static void svc_xprt_release(struct svc_rqst *rqstp)
  468. {
  469. struct svc_xprt *xprt = rqstp->rq_xprt;
  470. rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
  471. kfree(rqstp->rq_deferred);
  472. rqstp->rq_deferred = NULL;
  473. svc_free_res_pages(rqstp);
  474. rqstp->rq_res.page_len = 0;
  475. rqstp->rq_res.page_base = 0;
  476. /* Reset response buffer and release
  477. * the reservation.
  478. * But first, check that enough space was reserved
  479. * for the reply, otherwise we have a bug!
  480. */
  481. if ((rqstp->rq_res.len) > rqstp->rq_reserved)
  482. printk(KERN_ERR "RPC request reserved %d but used %d\n",
  483. rqstp->rq_reserved,
  484. rqstp->rq_res.len);
  485. rqstp->rq_res.head[0].iov_len = 0;
  486. svc_reserve(rqstp, 0);
  487. svc_xprt_release_slot(rqstp);
  488. rqstp->rq_xprt = NULL;
  489. svc_xprt_put(xprt);
  490. }
  491. /*
  492. * Some svc_serv's will have occasional work to do, even when a xprt is not
  493. * waiting to be serviced. This function is there to "kick" a task in one of
  494. * those services so that it can wake up and do that work. Note that we only
  495. * bother with pool 0 as we don't need to wake up more than one thread for
  496. * this purpose.
  497. */
  498. void svc_wake_up(struct svc_serv *serv)
  499. {
  500. struct svc_rqst *rqstp;
  501. struct svc_pool *pool;
  502. pool = &serv->sv_pools[0];
  503. rcu_read_lock();
  504. list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
  505. /* skip any that aren't queued */
  506. if (test_bit(RQ_BUSY, &rqstp->rq_flags))
  507. continue;
  508. rcu_read_unlock();
  509. dprintk("svc: daemon %p woken up.\n", rqstp);
  510. wake_up_process(rqstp->rq_task);
  511. trace_svc_wake_up(rqstp->rq_task->pid);
  512. return;
  513. }
  514. rcu_read_unlock();
  515. /* No free entries available */
  516. set_bit(SP_TASK_PENDING, &pool->sp_flags);
  517. smp_wmb();
  518. trace_svc_wake_up(0);
  519. }
  520. EXPORT_SYMBOL_GPL(svc_wake_up);
  521. int svc_port_is_privileged(struct sockaddr *sin)
  522. {
  523. switch (sin->sa_family) {
  524. case AF_INET:
  525. return ntohs(((struct sockaddr_in *)sin)->sin_port)
  526. < PROT_SOCK;
  527. case AF_INET6:
  528. return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
  529. < PROT_SOCK;
  530. default:
  531. return 0;
  532. }
  533. }
  534. /*
  535. * Make sure that we don't have too many active connections. If we have,
  536. * something must be dropped. It's not clear what will happen if we allow
  537. * "too many" connections, but when dealing with network-facing software,
  538. * we have to code defensively. Here we do that by imposing hard limits.
  539. *
  540. * There's no point in trying to do random drop here for DoS
  541. * prevention. The NFS clients does 1 reconnect in 15 seconds. An
  542. * attacker can easily beat that.
  543. *
  544. * The only somewhat efficient mechanism would be if drop old
  545. * connections from the same IP first. But right now we don't even
  546. * record the client IP in svc_sock.
  547. *
  548. * single-threaded services that expect a lot of clients will probably
  549. * need to set sv_maxconn to override the default value which is based
  550. * on the number of threads
  551. */
  552. static void svc_check_conn_limits(struct svc_serv *serv)
  553. {
  554. unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
  555. (serv->sv_nrthreads+3) * 20;
  556. if (serv->sv_tmpcnt > limit) {
  557. struct svc_xprt *xprt = NULL;
  558. spin_lock_bh(&serv->sv_lock);
  559. if (!list_empty(&serv->sv_tempsocks)) {
  560. /* Try to help the admin */
  561. net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
  562. serv->sv_name, serv->sv_maxconn ?
  563. "max number of connections" :
  564. "number of threads");
  565. /*
  566. * Always select the oldest connection. It's not fair,
  567. * but so is life
  568. */
  569. xprt = list_entry(serv->sv_tempsocks.prev,
  570. struct svc_xprt,
  571. xpt_list);
  572. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  573. svc_xprt_get(xprt);
  574. }
  575. spin_unlock_bh(&serv->sv_lock);
  576. if (xprt) {
  577. svc_xprt_enqueue(xprt);
  578. svc_xprt_put(xprt);
  579. }
  580. }
  581. }
  582. static int svc_alloc_arg(struct svc_rqst *rqstp)
  583. {
  584. struct svc_serv *serv = rqstp->rq_server;
  585. struct xdr_buf *arg;
  586. int pages;
  587. int i;
  588. /* now allocate needed pages. If we get a failure, sleep briefly */
  589. pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
  590. if (pages > RPCSVC_MAXPAGES) {
  591. pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n",
  592. pages, RPCSVC_MAXPAGES);
  593. /* use as many pages as possible */
  594. pages = RPCSVC_MAXPAGES;
  595. }
  596. for (i = 0; i < pages ; i++)
  597. while (rqstp->rq_pages[i] == NULL) {
  598. struct page *p = alloc_page(GFP_KERNEL);
  599. if (!p) {
  600. set_current_state(TASK_INTERRUPTIBLE);
  601. if (signalled() || kthread_should_stop()) {
  602. set_current_state(TASK_RUNNING);
  603. return -EINTR;
  604. }
  605. schedule_timeout(msecs_to_jiffies(500));
  606. }
  607. rqstp->rq_pages[i] = p;
  608. }
  609. rqstp->rq_page_end = &rqstp->rq_pages[i];
  610. rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
  611. /* Make arg->head point to first page and arg->pages point to rest */
  612. arg = &rqstp->rq_arg;
  613. arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
  614. arg->head[0].iov_len = PAGE_SIZE;
  615. arg->pages = rqstp->rq_pages + 1;
  616. arg->page_base = 0;
  617. /* save at least one page for response */
  618. arg->page_len = (pages-2)*PAGE_SIZE;
  619. arg->len = (pages-1)*PAGE_SIZE;
  620. arg->tail[0].iov_len = 0;
  621. return 0;
  622. }
  623. static bool
  624. rqst_should_sleep(struct svc_rqst *rqstp)
  625. {
  626. struct svc_pool *pool = rqstp->rq_pool;
  627. /* did someone call svc_wake_up? */
  628. if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
  629. return false;
  630. /* was a socket queued? */
  631. if (!list_empty(&pool->sp_sockets))
  632. return false;
  633. /* are we shutting down? */
  634. if (signalled() || kthread_should_stop())
  635. return false;
  636. /* are we freezing? */
  637. if (freezing(current))
  638. return false;
  639. return true;
  640. }
  641. static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
  642. {
  643. struct svc_xprt *xprt;
  644. struct svc_pool *pool = rqstp->rq_pool;
  645. long time_left = 0;
  646. /* rq_xprt should be clear on entry */
  647. WARN_ON_ONCE(rqstp->rq_xprt);
  648. /* Normally we will wait up to 5 seconds for any required
  649. * cache information to be provided.
  650. */
  651. rqstp->rq_chandle.thread_wait = 5*HZ;
  652. xprt = svc_xprt_dequeue(pool);
  653. if (xprt) {
  654. rqstp->rq_xprt = xprt;
  655. /* As there is a shortage of threads and this request
  656. * had to be queued, don't allow the thread to wait so
  657. * long for cache updates.
  658. */
  659. rqstp->rq_chandle.thread_wait = 1*HZ;
  660. clear_bit(SP_TASK_PENDING, &pool->sp_flags);
  661. return xprt;
  662. }
  663. /*
  664. * We have to be able to interrupt this wait
  665. * to bring down the daemons ...
  666. */
  667. set_current_state(TASK_INTERRUPTIBLE);
  668. clear_bit(RQ_BUSY, &rqstp->rq_flags);
  669. smp_mb();
  670. if (likely(rqst_should_sleep(rqstp)))
  671. time_left = schedule_timeout(timeout);
  672. else
  673. __set_current_state(TASK_RUNNING);
  674. try_to_freeze();
  675. spin_lock_bh(&rqstp->rq_lock);
  676. set_bit(RQ_BUSY, &rqstp->rq_flags);
  677. spin_unlock_bh(&rqstp->rq_lock);
  678. xprt = rqstp->rq_xprt;
  679. if (xprt != NULL)
  680. return xprt;
  681. if (!time_left)
  682. atomic_long_inc(&pool->sp_stats.threads_timedout);
  683. if (signalled() || kthread_should_stop())
  684. return ERR_PTR(-EINTR);
  685. return ERR_PTR(-EAGAIN);
  686. }
  687. static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
  688. {
  689. spin_lock_bh(&serv->sv_lock);
  690. set_bit(XPT_TEMP, &newxpt->xpt_flags);
  691. list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
  692. serv->sv_tmpcnt++;
  693. if (serv->sv_temptimer.function == NULL) {
  694. /* setup timer to age temp transports */
  695. setup_timer(&serv->sv_temptimer, svc_age_temp_xprts,
  696. (unsigned long)serv);
  697. mod_timer(&serv->sv_temptimer,
  698. jiffies + svc_conn_age_period * HZ);
  699. }
  700. spin_unlock_bh(&serv->sv_lock);
  701. svc_xprt_received(newxpt);
  702. }
  703. static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  704. {
  705. struct svc_serv *serv = rqstp->rq_server;
  706. int len = 0;
  707. if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
  708. dprintk("svc_recv: found XPT_CLOSE\n");
  709. if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
  710. xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
  711. svc_delete_xprt(xprt);
  712. /* Leave XPT_BUSY set on the dead xprt: */
  713. goto out;
  714. }
  715. if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
  716. struct svc_xprt *newxpt;
  717. /*
  718. * We know this module_get will succeed because the
  719. * listener holds a reference too
  720. */
  721. __module_get(xprt->xpt_class->xcl_owner);
  722. svc_check_conn_limits(xprt->xpt_server);
  723. newxpt = xprt->xpt_ops->xpo_accept(xprt);
  724. if (newxpt)
  725. svc_add_new_temp_xprt(serv, newxpt);
  726. else
  727. module_put(xprt->xpt_class->xcl_owner);
  728. } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
  729. /* XPT_DATA|XPT_DEFERRED case: */
  730. dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
  731. rqstp, rqstp->rq_pool->sp_id, xprt,
  732. kref_read(&xprt->xpt_ref));
  733. rqstp->rq_deferred = svc_deferred_dequeue(xprt);
  734. if (rqstp->rq_deferred)
  735. len = svc_deferred_recv(rqstp);
  736. else
  737. len = xprt->xpt_ops->xpo_recvfrom(rqstp);
  738. dprintk("svc: got len=%d\n", len);
  739. rqstp->rq_reserved = serv->sv_max_mesg;
  740. atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
  741. }
  742. /* clear XPT_BUSY: */
  743. svc_xprt_received(xprt);
  744. out:
  745. trace_svc_handle_xprt(xprt, len);
  746. return len;
  747. }
  748. /*
  749. * Receive the next request on any transport. This code is carefully
  750. * organised not to touch any cachelines in the shared svc_serv
  751. * structure, only cachelines in the local svc_pool.
  752. */
  753. int svc_recv(struct svc_rqst *rqstp, long timeout)
  754. {
  755. struct svc_xprt *xprt = NULL;
  756. struct svc_serv *serv = rqstp->rq_server;
  757. int len, err;
  758. dprintk("svc: server %p waiting for data (to = %ld)\n",
  759. rqstp, timeout);
  760. if (rqstp->rq_xprt)
  761. printk(KERN_ERR
  762. "svc_recv: service %p, transport not NULL!\n",
  763. rqstp);
  764. err = svc_alloc_arg(rqstp);
  765. if (err)
  766. goto out;
  767. try_to_freeze();
  768. cond_resched();
  769. err = -EINTR;
  770. if (signalled() || kthread_should_stop())
  771. goto out;
  772. xprt = svc_get_next_xprt(rqstp, timeout);
  773. if (IS_ERR(xprt)) {
  774. err = PTR_ERR(xprt);
  775. goto out;
  776. }
  777. len = svc_handle_xprt(rqstp, xprt);
  778. /* No data, incomplete (TCP) read, or accept() */
  779. err = -EAGAIN;
  780. if (len <= 0)
  781. goto out_release;
  782. clear_bit(XPT_OLD, &xprt->xpt_flags);
  783. if (xprt->xpt_ops->xpo_secure_port(rqstp))
  784. set_bit(RQ_SECURE, &rqstp->rq_flags);
  785. else
  786. clear_bit(RQ_SECURE, &rqstp->rq_flags);
  787. rqstp->rq_chandle.defer = svc_defer;
  788. rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
  789. if (serv->sv_stats)
  790. serv->sv_stats->netcnt++;
  791. trace_svc_recv(rqstp, len);
  792. return len;
  793. out_release:
  794. rqstp->rq_res.len = 0;
  795. svc_xprt_release(rqstp);
  796. out:
  797. trace_svc_recv(rqstp, err);
  798. return err;
  799. }
  800. EXPORT_SYMBOL_GPL(svc_recv);
  801. /*
  802. * Drop request
  803. */
  804. void svc_drop(struct svc_rqst *rqstp)
  805. {
  806. trace_svc_drop(rqstp);
  807. dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
  808. svc_xprt_release(rqstp);
  809. }
  810. EXPORT_SYMBOL_GPL(svc_drop);
  811. /*
  812. * Return reply to client.
  813. */
  814. int svc_send(struct svc_rqst *rqstp)
  815. {
  816. struct svc_xprt *xprt;
  817. int len = -EFAULT;
  818. struct xdr_buf *xb;
  819. xprt = rqstp->rq_xprt;
  820. if (!xprt)
  821. goto out;
  822. /* release the receive skb before sending the reply */
  823. rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
  824. /* calculate over-all length */
  825. xb = &rqstp->rq_res;
  826. xb->len = xb->head[0].iov_len +
  827. xb->page_len +
  828. xb->tail[0].iov_len;
  829. /* Grab mutex to serialize outgoing data. */
  830. mutex_lock(&xprt->xpt_mutex);
  831. if (test_bit(XPT_DEAD, &xprt->xpt_flags)
  832. || test_bit(XPT_CLOSE, &xprt->xpt_flags))
  833. len = -ENOTCONN;
  834. else
  835. len = xprt->xpt_ops->xpo_sendto(rqstp);
  836. mutex_unlock(&xprt->xpt_mutex);
  837. rpc_wake_up(&xprt->xpt_bc_pending);
  838. svc_xprt_release(rqstp);
  839. if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
  840. len = 0;
  841. out:
  842. trace_svc_send(rqstp, len);
  843. return len;
  844. }
  845. /*
  846. * Timer function to close old temporary transports, using
  847. * a mark-and-sweep algorithm.
  848. */
  849. static void svc_age_temp_xprts(unsigned long closure)
  850. {
  851. struct svc_serv *serv = (struct svc_serv *)closure;
  852. struct svc_xprt *xprt;
  853. struct list_head *le, *next;
  854. dprintk("svc_age_temp_xprts\n");
  855. if (!spin_trylock_bh(&serv->sv_lock)) {
  856. /* busy, try again 1 sec later */
  857. dprintk("svc_age_temp_xprts: busy\n");
  858. mod_timer(&serv->sv_temptimer, jiffies + HZ);
  859. return;
  860. }
  861. list_for_each_safe(le, next, &serv->sv_tempsocks) {
  862. xprt = list_entry(le, struct svc_xprt, xpt_list);
  863. /* First time through, just mark it OLD. Second time
  864. * through, close it. */
  865. if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
  866. continue;
  867. if (kref_read(&xprt->xpt_ref) > 1 ||
  868. test_bit(XPT_BUSY, &xprt->xpt_flags))
  869. continue;
  870. list_del_init(le);
  871. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  872. dprintk("queuing xprt %p for closing\n", xprt);
  873. /* a thread will dequeue and close it soon */
  874. svc_xprt_enqueue(xprt);
  875. }
  876. spin_unlock_bh(&serv->sv_lock);
  877. mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
  878. }
  879. /* Close temporary transports whose xpt_local matches server_addr immediately
  880. * instead of waiting for them to be picked up by the timer.
  881. *
  882. * This is meant to be called from a notifier_block that runs when an ip
  883. * address is deleted.
  884. */
  885. void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
  886. {
  887. struct svc_xprt *xprt;
  888. struct list_head *le, *next;
  889. LIST_HEAD(to_be_closed);
  890. spin_lock_bh(&serv->sv_lock);
  891. list_for_each_safe(le, next, &serv->sv_tempsocks) {
  892. xprt = list_entry(le, struct svc_xprt, xpt_list);
  893. if (rpc_cmp_addr(server_addr, (struct sockaddr *)
  894. &xprt->xpt_local)) {
  895. dprintk("svc_age_temp_xprts_now: found %p\n", xprt);
  896. list_move(le, &to_be_closed);
  897. }
  898. }
  899. spin_unlock_bh(&serv->sv_lock);
  900. while (!list_empty(&to_be_closed)) {
  901. le = to_be_closed.next;
  902. list_del_init(le);
  903. xprt = list_entry(le, struct svc_xprt, xpt_list);
  904. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  905. set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
  906. dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
  907. xprt);
  908. svc_xprt_enqueue(xprt);
  909. }
  910. }
  911. EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
  912. static void call_xpt_users(struct svc_xprt *xprt)
  913. {
  914. struct svc_xpt_user *u;
  915. spin_lock(&xprt->xpt_lock);
  916. while (!list_empty(&xprt->xpt_users)) {
  917. u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
  918. list_del(&u->list);
  919. u->callback(u);
  920. }
  921. spin_unlock(&xprt->xpt_lock);
  922. }
  923. /*
  924. * Remove a dead transport
  925. */
  926. static void svc_delete_xprt(struct svc_xprt *xprt)
  927. {
  928. struct svc_serv *serv = xprt->xpt_server;
  929. struct svc_deferred_req *dr;
  930. /* Only do this once */
  931. if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
  932. BUG();
  933. dprintk("svc: svc_delete_xprt(%p)\n", xprt);
  934. xprt->xpt_ops->xpo_detach(xprt);
  935. spin_lock_bh(&serv->sv_lock);
  936. list_del_init(&xprt->xpt_list);
  937. WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
  938. if (test_bit(XPT_TEMP, &xprt->xpt_flags))
  939. serv->sv_tmpcnt--;
  940. spin_unlock_bh(&serv->sv_lock);
  941. while ((dr = svc_deferred_dequeue(xprt)) != NULL)
  942. kfree(dr);
  943. call_xpt_users(xprt);
  944. svc_xprt_put(xprt);
  945. }
  946. void svc_close_xprt(struct svc_xprt *xprt)
  947. {
  948. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  949. if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
  950. /* someone else will have to effect the close */
  951. return;
  952. /*
  953. * We expect svc_close_xprt() to work even when no threads are
  954. * running (e.g., while configuring the server before starting
  955. * any threads), so if the transport isn't busy, we delete
  956. * it ourself:
  957. */
  958. svc_delete_xprt(xprt);
  959. }
  960. EXPORT_SYMBOL_GPL(svc_close_xprt);
  961. static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
  962. {
  963. struct svc_xprt *xprt;
  964. int ret = 0;
  965. spin_lock(&serv->sv_lock);
  966. list_for_each_entry(xprt, xprt_list, xpt_list) {
  967. if (xprt->xpt_net != net)
  968. continue;
  969. ret++;
  970. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  971. svc_xprt_enqueue(xprt);
  972. }
  973. spin_unlock(&serv->sv_lock);
  974. return ret;
  975. }
  976. static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
  977. {
  978. struct svc_pool *pool;
  979. struct svc_xprt *xprt;
  980. struct svc_xprt *tmp;
  981. int i;
  982. for (i = 0; i < serv->sv_nrpools; i++) {
  983. pool = &serv->sv_pools[i];
  984. spin_lock_bh(&pool->sp_lock);
  985. list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
  986. if (xprt->xpt_net != net)
  987. continue;
  988. list_del_init(&xprt->xpt_ready);
  989. spin_unlock_bh(&pool->sp_lock);
  990. return xprt;
  991. }
  992. spin_unlock_bh(&pool->sp_lock);
  993. }
  994. return NULL;
  995. }
  996. static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
  997. {
  998. struct svc_xprt *xprt;
  999. while ((xprt = svc_dequeue_net(serv, net))) {
  1000. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  1001. svc_delete_xprt(xprt);
  1002. }
  1003. }
  1004. /*
  1005. * Server threads may still be running (especially in the case where the
  1006. * service is still running in other network namespaces).
  1007. *
  1008. * So we shut down sockets the same way we would on a running server, by
  1009. * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
  1010. * the close. In the case there are no such other threads,
  1011. * threads running, svc_clean_up_xprts() does a simple version of a
  1012. * server's main event loop, and in the case where there are other
  1013. * threads, we may need to wait a little while and then check again to
  1014. * see if they're done.
  1015. */
  1016. void svc_close_net(struct svc_serv *serv, struct net *net)
  1017. {
  1018. int delay = 0;
  1019. while (svc_close_list(serv, &serv->sv_permsocks, net) +
  1020. svc_close_list(serv, &serv->sv_tempsocks, net)) {
  1021. svc_clean_up_xprts(serv, net);
  1022. msleep(delay++);
  1023. }
  1024. }
  1025. /*
  1026. * Handle defer and revisit of requests
  1027. */
  1028. static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
  1029. {
  1030. struct svc_deferred_req *dr =
  1031. container_of(dreq, struct svc_deferred_req, handle);
  1032. struct svc_xprt *xprt = dr->xprt;
  1033. spin_lock(&xprt->xpt_lock);
  1034. set_bit(XPT_DEFERRED, &xprt->xpt_flags);
  1035. if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
  1036. spin_unlock(&xprt->xpt_lock);
  1037. dprintk("revisit canceled\n");
  1038. svc_xprt_put(xprt);
  1039. trace_svc_drop_deferred(dr);
  1040. kfree(dr);
  1041. return;
  1042. }
  1043. dprintk("revisit queued\n");
  1044. dr->xprt = NULL;
  1045. list_add(&dr->handle.recent, &xprt->xpt_deferred);
  1046. spin_unlock(&xprt->xpt_lock);
  1047. svc_xprt_enqueue(xprt);
  1048. svc_xprt_put(xprt);
  1049. }
  1050. /*
  1051. * Save the request off for later processing. The request buffer looks
  1052. * like this:
  1053. *
  1054. * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
  1055. *
  1056. * This code can only handle requests that consist of an xprt-header
  1057. * and rpc-header.
  1058. */
  1059. static struct cache_deferred_req *svc_defer(struct cache_req *req)
  1060. {
  1061. struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
  1062. struct svc_deferred_req *dr;
  1063. if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
  1064. return NULL; /* if more than a page, give up FIXME */
  1065. if (rqstp->rq_deferred) {
  1066. dr = rqstp->rq_deferred;
  1067. rqstp->rq_deferred = NULL;
  1068. } else {
  1069. size_t skip;
  1070. size_t size;
  1071. /* FIXME maybe discard if size too large */
  1072. size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
  1073. dr = kmalloc(size, GFP_KERNEL);
  1074. if (dr == NULL)
  1075. return NULL;
  1076. dr->handle.owner = rqstp->rq_server;
  1077. dr->prot = rqstp->rq_prot;
  1078. memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
  1079. dr->addrlen = rqstp->rq_addrlen;
  1080. dr->daddr = rqstp->rq_daddr;
  1081. dr->argslen = rqstp->rq_arg.len >> 2;
  1082. dr->xprt_hlen = rqstp->rq_xprt_hlen;
  1083. /* back up head to the start of the buffer and copy */
  1084. skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
  1085. memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
  1086. dr->argslen << 2);
  1087. }
  1088. svc_xprt_get(rqstp->rq_xprt);
  1089. dr->xprt = rqstp->rq_xprt;
  1090. set_bit(RQ_DROPME, &rqstp->rq_flags);
  1091. dr->handle.revisit = svc_revisit;
  1092. trace_svc_defer(rqstp);
  1093. return &dr->handle;
  1094. }
  1095. /*
  1096. * recv data from a deferred request into an active one
  1097. */
  1098. static int svc_deferred_recv(struct svc_rqst *rqstp)
  1099. {
  1100. struct svc_deferred_req *dr = rqstp->rq_deferred;
  1101. /* setup iov_base past transport header */
  1102. rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
  1103. /* The iov_len does not include the transport header bytes */
  1104. rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
  1105. rqstp->rq_arg.page_len = 0;
  1106. /* The rq_arg.len includes the transport header bytes */
  1107. rqstp->rq_arg.len = dr->argslen<<2;
  1108. rqstp->rq_prot = dr->prot;
  1109. memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
  1110. rqstp->rq_addrlen = dr->addrlen;
  1111. /* Save off transport header len in case we get deferred again */
  1112. rqstp->rq_xprt_hlen = dr->xprt_hlen;
  1113. rqstp->rq_daddr = dr->daddr;
  1114. rqstp->rq_respages = rqstp->rq_pages;
  1115. return (dr->argslen<<2) - dr->xprt_hlen;
  1116. }
  1117. static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
  1118. {
  1119. struct svc_deferred_req *dr = NULL;
  1120. if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
  1121. return NULL;
  1122. spin_lock(&xprt->xpt_lock);
  1123. if (!list_empty(&xprt->xpt_deferred)) {
  1124. dr = list_entry(xprt->xpt_deferred.next,
  1125. struct svc_deferred_req,
  1126. handle.recent);
  1127. list_del_init(&dr->handle.recent);
  1128. trace_svc_revisit_deferred(dr);
  1129. } else
  1130. clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
  1131. spin_unlock(&xprt->xpt_lock);
  1132. return dr;
  1133. }
  1134. /**
  1135. * svc_find_xprt - find an RPC transport instance
  1136. * @serv: pointer to svc_serv to search
  1137. * @xcl_name: C string containing transport's class name
  1138. * @net: owner net pointer
  1139. * @af: Address family of transport's local address
  1140. * @port: transport's IP port number
  1141. *
  1142. * Return the transport instance pointer for the endpoint accepting
  1143. * connections/peer traffic from the specified transport class,
  1144. * address family and port.
  1145. *
  1146. * Specifying 0 for the address family or port is effectively a
  1147. * wild-card, and will result in matching the first transport in the
  1148. * service's list that has a matching class name.
  1149. */
  1150. struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
  1151. struct net *net, const sa_family_t af,
  1152. const unsigned short port)
  1153. {
  1154. struct svc_xprt *xprt;
  1155. struct svc_xprt *found = NULL;
  1156. /* Sanity check the args */
  1157. if (serv == NULL || xcl_name == NULL)
  1158. return found;
  1159. spin_lock_bh(&serv->sv_lock);
  1160. list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
  1161. if (xprt->xpt_net != net)
  1162. continue;
  1163. if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
  1164. continue;
  1165. if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
  1166. continue;
  1167. if (port != 0 && port != svc_xprt_local_port(xprt))
  1168. continue;
  1169. found = xprt;
  1170. svc_xprt_get(xprt);
  1171. break;
  1172. }
  1173. spin_unlock_bh(&serv->sv_lock);
  1174. return found;
  1175. }
  1176. EXPORT_SYMBOL_GPL(svc_find_xprt);
  1177. static int svc_one_xprt_name(const struct svc_xprt *xprt,
  1178. char *pos, int remaining)
  1179. {
  1180. int len;
  1181. len = snprintf(pos, remaining, "%s %u\n",
  1182. xprt->xpt_class->xcl_name,
  1183. svc_xprt_local_port(xprt));
  1184. if (len >= remaining)
  1185. return -ENAMETOOLONG;
  1186. return len;
  1187. }
  1188. /**
  1189. * svc_xprt_names - format a buffer with a list of transport names
  1190. * @serv: pointer to an RPC service
  1191. * @buf: pointer to a buffer to be filled in
  1192. * @buflen: length of buffer to be filled in
  1193. *
  1194. * Fills in @buf with a string containing a list of transport names,
  1195. * each name terminated with '\n'.
  1196. *
  1197. * Returns positive length of the filled-in string on success; otherwise
  1198. * a negative errno value is returned if an error occurs.
  1199. */
  1200. int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
  1201. {
  1202. struct svc_xprt *xprt;
  1203. int len, totlen;
  1204. char *pos;
  1205. /* Sanity check args */
  1206. if (!serv)
  1207. return 0;
  1208. spin_lock_bh(&serv->sv_lock);
  1209. pos = buf;
  1210. totlen = 0;
  1211. list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
  1212. len = svc_one_xprt_name(xprt, pos, buflen - totlen);
  1213. if (len < 0) {
  1214. *buf = '\0';
  1215. totlen = len;
  1216. }
  1217. if (len <= 0)
  1218. break;
  1219. pos += len;
  1220. totlen += len;
  1221. }
  1222. spin_unlock_bh(&serv->sv_lock);
  1223. return totlen;
  1224. }
  1225. EXPORT_SYMBOL_GPL(svc_xprt_names);
  1226. /*----------------------------------------------------------------------------*/
  1227. static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
  1228. {
  1229. unsigned int pidx = (unsigned int)*pos;
  1230. struct svc_serv *serv = m->private;
  1231. dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
  1232. if (!pidx)
  1233. return SEQ_START_TOKEN;
  1234. return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
  1235. }
  1236. static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
  1237. {
  1238. struct svc_pool *pool = p;
  1239. struct svc_serv *serv = m->private;
  1240. dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
  1241. if (p == SEQ_START_TOKEN) {
  1242. pool = &serv->sv_pools[0];
  1243. } else {
  1244. unsigned int pidx = (pool - &serv->sv_pools[0]);
  1245. if (pidx < serv->sv_nrpools-1)
  1246. pool = &serv->sv_pools[pidx+1];
  1247. else
  1248. pool = NULL;
  1249. }
  1250. ++*pos;
  1251. return pool;
  1252. }
  1253. static void svc_pool_stats_stop(struct seq_file *m, void *p)
  1254. {
  1255. }
  1256. static int svc_pool_stats_show(struct seq_file *m, void *p)
  1257. {
  1258. struct svc_pool *pool = p;
  1259. if (p == SEQ_START_TOKEN) {
  1260. seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
  1261. return 0;
  1262. }
  1263. seq_printf(m, "%u %lu %lu %lu %lu\n",
  1264. pool->sp_id,
  1265. (unsigned long)atomic_long_read(&pool->sp_stats.packets),
  1266. pool->sp_stats.sockets_queued,
  1267. (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
  1268. (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
  1269. return 0;
  1270. }
  1271. static const struct seq_operations svc_pool_stats_seq_ops = {
  1272. .start = svc_pool_stats_start,
  1273. .next = svc_pool_stats_next,
  1274. .stop = svc_pool_stats_stop,
  1275. .show = svc_pool_stats_show,
  1276. };
  1277. int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
  1278. {
  1279. int err;
  1280. err = seq_open(file, &svc_pool_stats_seq_ops);
  1281. if (!err)
  1282. ((struct seq_file *) file->private_data)->private = serv;
  1283. return err;
  1284. }
  1285. EXPORT_SYMBOL(svc_pool_stats_open);
  1286. /*----------------------------------------------------------------------------*/