svc_xprt.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * linux/net/sunrpc/svc_xprt.c
  3. *
  4. * Author: Tom Tucker <tom@opengridcomputing.com>
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/errno.h>
  8. #include <linux/freezer.h>
  9. #include <linux/kthread.h>
  10. #include <linux/slab.h>
  11. #include <net/sock.h>
  12. #include <linux/sunrpc/stats.h>
  13. #include <linux/sunrpc/svc_xprt.h>
  14. #include <linux/sunrpc/svcsock.h>
  15. #include <linux/sunrpc/xprt.h>
  16. #include <linux/module.h>
  17. #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  18. static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
  19. static int svc_deferred_recv(struct svc_rqst *rqstp);
  20. static struct cache_deferred_req *svc_defer(struct cache_req *req);
  21. static void svc_age_temp_xprts(unsigned long closure);
  22. static void svc_delete_xprt(struct svc_xprt *xprt);
  23. static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
  24. /* apparently the "standard" is that clients close
  25. * idle connections after 5 minutes, servers after
  26. * 6 minutes
  27. * http://www.connectathon.org/talks96/nfstcp.pdf
  28. */
  29. static int svc_conn_age_period = 6*60;
  30. /* List of registered transport classes */
  31. static DEFINE_SPINLOCK(svc_xprt_class_lock);
  32. static LIST_HEAD(svc_xprt_class_list);
  33. /* SMP locking strategy:
  34. *
  35. * svc_pool->sp_lock protects most of the fields of that pool.
  36. * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
  37. * when both need to be taken (rare), svc_serv->sv_lock is first.
  38. * BKL protects svc_serv->sv_nrthread.
  39. * svc_sock->sk_lock protects the svc_sock->sk_deferred list
  40. * and the ->sk_info_authunix cache.
  41. *
  42. * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
  43. * enqueued multiply. During normal transport processing this bit
  44. * is set by svc_xprt_enqueue and cleared by svc_xprt_received.
  45. * Providers should not manipulate this bit directly.
  46. *
  47. * Some flags can be set to certain values at any time
  48. * providing that certain rules are followed:
  49. *
  50. * XPT_CONN, XPT_DATA:
  51. * - Can be set or cleared at any time.
  52. * - After a set, svc_xprt_enqueue must be called to enqueue
  53. * the transport for processing.
  54. * - After a clear, the transport must be read/accepted.
  55. * If this succeeds, it must be set again.
  56. * XPT_CLOSE:
  57. * - Can set at any time. It is never cleared.
  58. * XPT_DEAD:
  59. * - Can only be set while XPT_BUSY is held which ensures
  60. * that no other thread will be using the transport or will
  61. * try to set XPT_DEAD.
  62. */
  63. int svc_reg_xprt_class(struct svc_xprt_class *xcl)
  64. {
  65. struct svc_xprt_class *cl;
  66. int res = -EEXIST;
  67. dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
  68. INIT_LIST_HEAD(&xcl->xcl_list);
  69. spin_lock(&svc_xprt_class_lock);
  70. /* Make sure there isn't already a class with the same name */
  71. list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
  72. if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
  73. goto out;
  74. }
  75. list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
  76. res = 0;
  77. out:
  78. spin_unlock(&svc_xprt_class_lock);
  79. return res;
  80. }
  81. EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
  82. void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
  83. {
  84. dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
  85. spin_lock(&svc_xprt_class_lock);
  86. list_del_init(&xcl->xcl_list);
  87. spin_unlock(&svc_xprt_class_lock);
  88. }
  89. EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
  90. /*
  91. * Format the transport list for printing
  92. */
  93. int svc_print_xprts(char *buf, int maxlen)
  94. {
  95. struct svc_xprt_class *xcl;
  96. char tmpstr[80];
  97. int len = 0;
  98. buf[0] = '\0';
  99. spin_lock(&svc_xprt_class_lock);
  100. list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
  101. int slen;
  102. sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
  103. slen = strlen(tmpstr);
  104. if (len + slen > maxlen)
  105. break;
  106. len += slen;
  107. strcat(buf, tmpstr);
  108. }
  109. spin_unlock(&svc_xprt_class_lock);
  110. return len;
  111. }
  112. static void svc_xprt_free(struct kref *kref)
  113. {
  114. struct svc_xprt *xprt =
  115. container_of(kref, struct svc_xprt, xpt_ref);
  116. struct module *owner = xprt->xpt_class->xcl_owner;
  117. if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
  118. svcauth_unix_info_release(xprt);
  119. put_net(xprt->xpt_net);
  120. /* See comment on corresponding get in xs_setup_bc_tcp(): */
  121. if (xprt->xpt_bc_xprt)
  122. xprt_put(xprt->xpt_bc_xprt);
  123. xprt->xpt_ops->xpo_free(xprt);
  124. module_put(owner);
  125. }
  126. void svc_xprt_put(struct svc_xprt *xprt)
  127. {
  128. kref_put(&xprt->xpt_ref, svc_xprt_free);
  129. }
  130. EXPORT_SYMBOL_GPL(svc_xprt_put);
  131. /*
  132. * Called by transport drivers to initialize the transport independent
  133. * portion of the transport instance.
  134. */
  135. void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
  136. struct svc_xprt *xprt, struct svc_serv *serv)
  137. {
  138. memset(xprt, 0, sizeof(*xprt));
  139. xprt->xpt_class = xcl;
  140. xprt->xpt_ops = xcl->xcl_ops;
  141. kref_init(&xprt->xpt_ref);
  142. xprt->xpt_server = serv;
  143. INIT_LIST_HEAD(&xprt->xpt_list);
  144. INIT_LIST_HEAD(&xprt->xpt_ready);
  145. INIT_LIST_HEAD(&xprt->xpt_deferred);
  146. INIT_LIST_HEAD(&xprt->xpt_users);
  147. mutex_init(&xprt->xpt_mutex);
  148. spin_lock_init(&xprt->xpt_lock);
  149. set_bit(XPT_BUSY, &xprt->xpt_flags);
  150. rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
  151. xprt->xpt_net = get_net(net);
  152. }
  153. EXPORT_SYMBOL_GPL(svc_xprt_init);
  154. static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
  155. struct svc_serv *serv,
  156. struct net *net,
  157. const int family,
  158. const unsigned short port,
  159. int flags)
  160. {
  161. struct sockaddr_in sin = {
  162. .sin_family = AF_INET,
  163. .sin_addr.s_addr = htonl(INADDR_ANY),
  164. .sin_port = htons(port),
  165. };
  166. #if IS_ENABLED(CONFIG_IPV6)
  167. struct sockaddr_in6 sin6 = {
  168. .sin6_family = AF_INET6,
  169. .sin6_addr = IN6ADDR_ANY_INIT,
  170. .sin6_port = htons(port),
  171. };
  172. #endif
  173. struct sockaddr *sap;
  174. size_t len;
  175. switch (family) {
  176. case PF_INET:
  177. sap = (struct sockaddr *)&sin;
  178. len = sizeof(sin);
  179. break;
  180. #if IS_ENABLED(CONFIG_IPV6)
  181. case PF_INET6:
  182. sap = (struct sockaddr *)&sin6;
  183. len = sizeof(sin6);
  184. break;
  185. #endif
  186. default:
  187. return ERR_PTR(-EAFNOSUPPORT);
  188. }
  189. return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
  190. }
  191. /*
  192. * svc_xprt_received conditionally queues the transport for processing
  193. * by another thread. The caller must hold the XPT_BUSY bit and must
  194. * not thereafter touch transport data.
  195. *
  196. * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
  197. * insufficient) data.
  198. */
  199. static void svc_xprt_received(struct svc_xprt *xprt)
  200. {
  201. WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
  202. if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
  203. return;
  204. /* As soon as we clear busy, the xprt could be closed and
  205. * 'put', so we need a reference to call svc_xprt_do_enqueue with:
  206. */
  207. svc_xprt_get(xprt);
  208. smp_mb__before_atomic();
  209. clear_bit(XPT_BUSY, &xprt->xpt_flags);
  210. svc_xprt_do_enqueue(xprt);
  211. svc_xprt_put(xprt);
  212. }
  213. void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
  214. {
  215. clear_bit(XPT_TEMP, &new->xpt_flags);
  216. spin_lock_bh(&serv->sv_lock);
  217. list_add(&new->xpt_list, &serv->sv_permsocks);
  218. spin_unlock_bh(&serv->sv_lock);
  219. svc_xprt_received(new);
  220. }
  221. int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
  222. struct net *net, const int family,
  223. const unsigned short port, int flags)
  224. {
  225. struct svc_xprt_class *xcl;
  226. dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
  227. spin_lock(&svc_xprt_class_lock);
  228. list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
  229. struct svc_xprt *newxprt;
  230. unsigned short newport;
  231. if (strcmp(xprt_name, xcl->xcl_name))
  232. continue;
  233. if (!try_module_get(xcl->xcl_owner))
  234. goto err;
  235. spin_unlock(&svc_xprt_class_lock);
  236. newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
  237. if (IS_ERR(newxprt)) {
  238. module_put(xcl->xcl_owner);
  239. return PTR_ERR(newxprt);
  240. }
  241. svc_add_new_perm_xprt(serv, newxprt);
  242. newport = svc_xprt_local_port(newxprt);
  243. return newport;
  244. }
  245. err:
  246. spin_unlock(&svc_xprt_class_lock);
  247. dprintk("svc: transport %s not found\n", xprt_name);
  248. /* This errno is exposed to user space. Provide a reasonable
  249. * perror msg for a bad transport. */
  250. return -EPROTONOSUPPORT;
  251. }
  252. EXPORT_SYMBOL_GPL(svc_create_xprt);
  253. /*
  254. * Copy the local and remote xprt addresses to the rqstp structure
  255. */
  256. void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  257. {
  258. memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
  259. rqstp->rq_addrlen = xprt->xpt_remotelen;
  260. /*
  261. * Destination address in request is needed for binding the
  262. * source address in RPC replies/callbacks later.
  263. */
  264. memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
  265. rqstp->rq_daddrlen = xprt->xpt_locallen;
  266. }
  267. EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
  268. /**
  269. * svc_print_addr - Format rq_addr field for printing
  270. * @rqstp: svc_rqst struct containing address to print
  271. * @buf: target buffer for formatted address
  272. * @len: length of target buffer
  273. *
  274. */
  275. char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
  276. {
  277. return __svc_print_addr(svc_addr(rqstp), buf, len);
  278. }
  279. EXPORT_SYMBOL_GPL(svc_print_addr);
  280. /*
  281. * Queue up an idle server thread. Must have pool->sp_lock held.
  282. * Note: this is really a stack rather than a queue, so that we only
  283. * use as many different threads as we need, and the rest don't pollute
  284. * the cache.
  285. */
  286. static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
  287. {
  288. list_add(&rqstp->rq_list, &pool->sp_threads);
  289. }
  290. /*
  291. * Dequeue an nfsd thread. Must have pool->sp_lock held.
  292. */
  293. static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
  294. {
  295. list_del(&rqstp->rq_list);
  296. }
  297. static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
  298. {
  299. if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
  300. return true;
  301. if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
  302. return xprt->xpt_ops->xpo_has_wspace(xprt);
  303. return false;
  304. }
  305. static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
  306. {
  307. struct svc_pool *pool;
  308. struct svc_rqst *rqstp;
  309. int cpu;
  310. if (!svc_xprt_has_something_to_do(xprt))
  311. return;
  312. /* Mark transport as busy. It will remain in this state until
  313. * the provider calls svc_xprt_received. We update XPT_BUSY
  314. * atomically because it also guards against trying to enqueue
  315. * the transport twice.
  316. */
  317. if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
  318. /* Don't enqueue transport while already enqueued */
  319. dprintk("svc: transport %p busy, not enqueued\n", xprt);
  320. return;
  321. }
  322. cpu = get_cpu();
  323. pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
  324. spin_lock_bh(&pool->sp_lock);
  325. pool->sp_stats.packets++;
  326. if (!list_empty(&pool->sp_threads)) {
  327. rqstp = list_entry(pool->sp_threads.next,
  328. struct svc_rqst,
  329. rq_list);
  330. dprintk("svc: transport %p served by daemon %p\n",
  331. xprt, rqstp);
  332. svc_thread_dequeue(pool, rqstp);
  333. if (rqstp->rq_xprt)
  334. printk(KERN_ERR
  335. "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
  336. rqstp, rqstp->rq_xprt);
  337. /* Note the order of the following 3 lines:
  338. * We want to assign xprt to rqstp->rq_xprt only _after_
  339. * we've woken up the process, so that we don't race with
  340. * the lockless check in svc_get_next_xprt().
  341. */
  342. svc_xprt_get(xprt);
  343. wake_up_process(rqstp->rq_task);
  344. rqstp->rq_xprt = xprt;
  345. pool->sp_stats.threads_woken++;
  346. } else {
  347. dprintk("svc: transport %p put into queue\n", xprt);
  348. list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
  349. pool->sp_stats.sockets_queued++;
  350. }
  351. spin_unlock_bh(&pool->sp_lock);
  352. put_cpu();
  353. }
  354. /*
  355. * Queue up a transport with data pending. If there are idle nfsd
  356. * processes, wake 'em up.
  357. *
  358. */
  359. void svc_xprt_enqueue(struct svc_xprt *xprt)
  360. {
  361. if (test_bit(XPT_BUSY, &xprt->xpt_flags))
  362. return;
  363. svc_xprt_do_enqueue(xprt);
  364. }
  365. EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
  366. /*
  367. * Dequeue the first transport. Must be called with the pool->sp_lock held.
  368. */
  369. static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
  370. {
  371. struct svc_xprt *xprt;
  372. if (list_empty(&pool->sp_sockets))
  373. return NULL;
  374. xprt = list_entry(pool->sp_sockets.next,
  375. struct svc_xprt, xpt_ready);
  376. list_del_init(&xprt->xpt_ready);
  377. dprintk("svc: transport %p dequeued, inuse=%d\n",
  378. xprt, atomic_read(&xprt->xpt_ref.refcount));
  379. return xprt;
  380. }
  381. /**
  382. * svc_reserve - change the space reserved for the reply to a request.
  383. * @rqstp: The request in question
  384. * @space: new max space to reserve
  385. *
  386. * Each request reserves some space on the output queue of the transport
  387. * to make sure the reply fits. This function reduces that reserved
  388. * space to be the amount of space used already, plus @space.
  389. *
  390. */
  391. void svc_reserve(struct svc_rqst *rqstp, int space)
  392. {
  393. space += rqstp->rq_res.head[0].iov_len;
  394. if (space < rqstp->rq_reserved) {
  395. struct svc_xprt *xprt = rqstp->rq_xprt;
  396. atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
  397. rqstp->rq_reserved = space;
  398. if (xprt->xpt_ops->xpo_adjust_wspace)
  399. xprt->xpt_ops->xpo_adjust_wspace(xprt);
  400. svc_xprt_enqueue(xprt);
  401. }
  402. }
  403. EXPORT_SYMBOL_GPL(svc_reserve);
  404. static void svc_xprt_release(struct svc_rqst *rqstp)
  405. {
  406. struct svc_xprt *xprt = rqstp->rq_xprt;
  407. rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
  408. kfree(rqstp->rq_deferred);
  409. rqstp->rq_deferred = NULL;
  410. svc_free_res_pages(rqstp);
  411. rqstp->rq_res.page_len = 0;
  412. rqstp->rq_res.page_base = 0;
  413. /* Reset response buffer and release
  414. * the reservation.
  415. * But first, check that enough space was reserved
  416. * for the reply, otherwise we have a bug!
  417. */
  418. if ((rqstp->rq_res.len) > rqstp->rq_reserved)
  419. printk(KERN_ERR "RPC request reserved %d but used %d\n",
  420. rqstp->rq_reserved,
  421. rqstp->rq_res.len);
  422. rqstp->rq_res.head[0].iov_len = 0;
  423. svc_reserve(rqstp, 0);
  424. rqstp->rq_xprt = NULL;
  425. svc_xprt_put(xprt);
  426. }
  427. /*
  428. * External function to wake up a server waiting for data
  429. * This really only makes sense for services like lockd
  430. * which have exactly one thread anyway.
  431. */
  432. void svc_wake_up(struct svc_serv *serv)
  433. {
  434. struct svc_rqst *rqstp;
  435. unsigned int i;
  436. struct svc_pool *pool;
  437. for (i = 0; i < serv->sv_nrpools; i++) {
  438. pool = &serv->sv_pools[i];
  439. spin_lock_bh(&pool->sp_lock);
  440. if (!list_empty(&pool->sp_threads)) {
  441. rqstp = list_entry(pool->sp_threads.next,
  442. struct svc_rqst,
  443. rq_list);
  444. dprintk("svc: daemon %p woken up.\n", rqstp);
  445. /*
  446. svc_thread_dequeue(pool, rqstp);
  447. rqstp->rq_xprt = NULL;
  448. */
  449. wake_up_process(rqstp->rq_task);
  450. } else
  451. pool->sp_task_pending = 1;
  452. spin_unlock_bh(&pool->sp_lock);
  453. }
  454. }
  455. EXPORT_SYMBOL_GPL(svc_wake_up);
  456. int svc_port_is_privileged(struct sockaddr *sin)
  457. {
  458. switch (sin->sa_family) {
  459. case AF_INET:
  460. return ntohs(((struct sockaddr_in *)sin)->sin_port)
  461. < PROT_SOCK;
  462. case AF_INET6:
  463. return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
  464. < PROT_SOCK;
  465. default:
  466. return 0;
  467. }
  468. }
  469. /*
  470. * Make sure that we don't have too many active connections. If we have,
  471. * something must be dropped. It's not clear what will happen if we allow
  472. * "too many" connections, but when dealing with network-facing software,
  473. * we have to code defensively. Here we do that by imposing hard limits.
  474. *
  475. * There's no point in trying to do random drop here for DoS
  476. * prevention. The NFS clients does 1 reconnect in 15 seconds. An
  477. * attacker can easily beat that.
  478. *
  479. * The only somewhat efficient mechanism would be if drop old
  480. * connections from the same IP first. But right now we don't even
  481. * record the client IP in svc_sock.
  482. *
  483. * single-threaded services that expect a lot of clients will probably
  484. * need to set sv_maxconn to override the default value which is based
  485. * on the number of threads
  486. */
  487. static void svc_check_conn_limits(struct svc_serv *serv)
  488. {
  489. unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
  490. (serv->sv_nrthreads+3) * 20;
  491. if (serv->sv_tmpcnt > limit) {
  492. struct svc_xprt *xprt = NULL;
  493. spin_lock_bh(&serv->sv_lock);
  494. if (!list_empty(&serv->sv_tempsocks)) {
  495. /* Try to help the admin */
  496. net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
  497. serv->sv_name, serv->sv_maxconn ?
  498. "max number of connections" :
  499. "number of threads");
  500. /*
  501. * Always select the oldest connection. It's not fair,
  502. * but so is life
  503. */
  504. xprt = list_entry(serv->sv_tempsocks.prev,
  505. struct svc_xprt,
  506. xpt_list);
  507. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  508. svc_xprt_get(xprt);
  509. }
  510. spin_unlock_bh(&serv->sv_lock);
  511. if (xprt) {
  512. svc_xprt_enqueue(xprt);
  513. svc_xprt_put(xprt);
  514. }
  515. }
  516. }
  517. static int svc_alloc_arg(struct svc_rqst *rqstp)
  518. {
  519. struct svc_serv *serv = rqstp->rq_server;
  520. struct xdr_buf *arg;
  521. int pages;
  522. int i;
  523. /* now allocate needed pages. If we get a failure, sleep briefly */
  524. pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
  525. WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
  526. if (pages >= RPCSVC_MAXPAGES)
  527. /* use as many pages as possible */
  528. pages = RPCSVC_MAXPAGES - 1;
  529. for (i = 0; i < pages ; i++)
  530. while (rqstp->rq_pages[i] == NULL) {
  531. struct page *p = alloc_page(GFP_KERNEL);
  532. if (!p) {
  533. set_current_state(TASK_INTERRUPTIBLE);
  534. if (signalled() || kthread_should_stop()) {
  535. set_current_state(TASK_RUNNING);
  536. return -EINTR;
  537. }
  538. schedule_timeout(msecs_to_jiffies(500));
  539. }
  540. rqstp->rq_pages[i] = p;
  541. }
  542. rqstp->rq_page_end = &rqstp->rq_pages[i];
  543. rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
  544. /* Make arg->head point to first page and arg->pages point to rest */
  545. arg = &rqstp->rq_arg;
  546. arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
  547. arg->head[0].iov_len = PAGE_SIZE;
  548. arg->pages = rqstp->rq_pages + 1;
  549. arg->page_base = 0;
  550. /* save at least one page for response */
  551. arg->page_len = (pages-2)*PAGE_SIZE;
  552. arg->len = (pages-1)*PAGE_SIZE;
  553. arg->tail[0].iov_len = 0;
  554. return 0;
  555. }
  556. static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
  557. {
  558. struct svc_xprt *xprt;
  559. struct svc_pool *pool = rqstp->rq_pool;
  560. long time_left = 0;
  561. /* Normally we will wait up to 5 seconds for any required
  562. * cache information to be provided.
  563. */
  564. rqstp->rq_chandle.thread_wait = 5*HZ;
  565. spin_lock_bh(&pool->sp_lock);
  566. xprt = svc_xprt_dequeue(pool);
  567. if (xprt) {
  568. rqstp->rq_xprt = xprt;
  569. svc_xprt_get(xprt);
  570. /* As there is a shortage of threads and this request
  571. * had to be queued, don't allow the thread to wait so
  572. * long for cache updates.
  573. */
  574. rqstp->rq_chandle.thread_wait = 1*HZ;
  575. pool->sp_task_pending = 0;
  576. } else {
  577. if (pool->sp_task_pending) {
  578. pool->sp_task_pending = 0;
  579. xprt = ERR_PTR(-EAGAIN);
  580. goto out;
  581. }
  582. /*
  583. * We have to be able to interrupt this wait
  584. * to bring down the daemons ...
  585. */
  586. set_current_state(TASK_INTERRUPTIBLE);
  587. /* No data pending. Go to sleep */
  588. svc_thread_enqueue(pool, rqstp);
  589. spin_unlock_bh(&pool->sp_lock);
  590. if (!(signalled() || kthread_should_stop())) {
  591. time_left = schedule_timeout(timeout);
  592. __set_current_state(TASK_RUNNING);
  593. try_to_freeze();
  594. xprt = rqstp->rq_xprt;
  595. if (xprt != NULL)
  596. return xprt;
  597. } else
  598. __set_current_state(TASK_RUNNING);
  599. spin_lock_bh(&pool->sp_lock);
  600. if (!time_left)
  601. pool->sp_stats.threads_timedout++;
  602. xprt = rqstp->rq_xprt;
  603. if (!xprt) {
  604. svc_thread_dequeue(pool, rqstp);
  605. spin_unlock_bh(&pool->sp_lock);
  606. dprintk("svc: server %p, no data yet\n", rqstp);
  607. if (signalled() || kthread_should_stop())
  608. return ERR_PTR(-EINTR);
  609. else
  610. return ERR_PTR(-EAGAIN);
  611. }
  612. }
  613. out:
  614. spin_unlock_bh(&pool->sp_lock);
  615. return xprt;
  616. }
  617. static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
  618. {
  619. spin_lock_bh(&serv->sv_lock);
  620. set_bit(XPT_TEMP, &newxpt->xpt_flags);
  621. list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
  622. serv->sv_tmpcnt++;
  623. if (serv->sv_temptimer.function == NULL) {
  624. /* setup timer to age temp transports */
  625. setup_timer(&serv->sv_temptimer, svc_age_temp_xprts,
  626. (unsigned long)serv);
  627. mod_timer(&serv->sv_temptimer,
  628. jiffies + svc_conn_age_period * HZ);
  629. }
  630. spin_unlock_bh(&serv->sv_lock);
  631. svc_xprt_received(newxpt);
  632. }
  633. static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  634. {
  635. struct svc_serv *serv = rqstp->rq_server;
  636. int len = 0;
  637. if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
  638. dprintk("svc_recv: found XPT_CLOSE\n");
  639. svc_delete_xprt(xprt);
  640. /* Leave XPT_BUSY set on the dead xprt: */
  641. return 0;
  642. }
  643. if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
  644. struct svc_xprt *newxpt;
  645. /*
  646. * We know this module_get will succeed because the
  647. * listener holds a reference too
  648. */
  649. __module_get(xprt->xpt_class->xcl_owner);
  650. svc_check_conn_limits(xprt->xpt_server);
  651. newxpt = xprt->xpt_ops->xpo_accept(xprt);
  652. if (newxpt)
  653. svc_add_new_temp_xprt(serv, newxpt);
  654. else
  655. module_put(xprt->xpt_class->xcl_owner);
  656. } else {
  657. /* XPT_DATA|XPT_DEFERRED case: */
  658. dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
  659. rqstp, rqstp->rq_pool->sp_id, xprt,
  660. atomic_read(&xprt->xpt_ref.refcount));
  661. rqstp->rq_deferred = svc_deferred_dequeue(xprt);
  662. if (rqstp->rq_deferred)
  663. len = svc_deferred_recv(rqstp);
  664. else
  665. len = xprt->xpt_ops->xpo_recvfrom(rqstp);
  666. dprintk("svc: got len=%d\n", len);
  667. rqstp->rq_reserved = serv->sv_max_mesg;
  668. atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
  669. }
  670. /* clear XPT_BUSY: */
  671. svc_xprt_received(xprt);
  672. return len;
  673. }
  674. /*
  675. * Receive the next request on any transport. This code is carefully
  676. * organised not to touch any cachelines in the shared svc_serv
  677. * structure, only cachelines in the local svc_pool.
  678. */
  679. int svc_recv(struct svc_rqst *rqstp, long timeout)
  680. {
  681. struct svc_xprt *xprt = NULL;
  682. struct svc_serv *serv = rqstp->rq_server;
  683. int len, err;
  684. dprintk("svc: server %p waiting for data (to = %ld)\n",
  685. rqstp, timeout);
  686. if (rqstp->rq_xprt)
  687. printk(KERN_ERR
  688. "svc_recv: service %p, transport not NULL!\n",
  689. rqstp);
  690. err = svc_alloc_arg(rqstp);
  691. if (err)
  692. return err;
  693. try_to_freeze();
  694. cond_resched();
  695. if (signalled() || kthread_should_stop())
  696. return -EINTR;
  697. xprt = svc_get_next_xprt(rqstp, timeout);
  698. if (IS_ERR(xprt))
  699. return PTR_ERR(xprt);
  700. len = svc_handle_xprt(rqstp, xprt);
  701. /* No data, incomplete (TCP) read, or accept() */
  702. if (len <= 0)
  703. goto out;
  704. clear_bit(XPT_OLD, &xprt->xpt_flags);
  705. rqstp->rq_secure = xprt->xpt_ops->xpo_secure_port(rqstp);
  706. rqstp->rq_chandle.defer = svc_defer;
  707. if (serv->sv_stats)
  708. serv->sv_stats->netcnt++;
  709. return len;
  710. out:
  711. rqstp->rq_res.len = 0;
  712. svc_xprt_release(rqstp);
  713. return -EAGAIN;
  714. }
  715. EXPORT_SYMBOL_GPL(svc_recv);
  716. /*
  717. * Drop request
  718. */
  719. void svc_drop(struct svc_rqst *rqstp)
  720. {
  721. dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
  722. svc_xprt_release(rqstp);
  723. }
  724. EXPORT_SYMBOL_GPL(svc_drop);
  725. /*
  726. * Return reply to client.
  727. */
  728. int svc_send(struct svc_rqst *rqstp)
  729. {
  730. struct svc_xprt *xprt;
  731. int len;
  732. struct xdr_buf *xb;
  733. xprt = rqstp->rq_xprt;
  734. if (!xprt)
  735. return -EFAULT;
  736. /* release the receive skb before sending the reply */
  737. rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
  738. /* calculate over-all length */
  739. xb = &rqstp->rq_res;
  740. xb->len = xb->head[0].iov_len +
  741. xb->page_len +
  742. xb->tail[0].iov_len;
  743. /* Grab mutex to serialize outgoing data. */
  744. mutex_lock(&xprt->xpt_mutex);
  745. if (test_bit(XPT_DEAD, &xprt->xpt_flags)
  746. || test_bit(XPT_CLOSE, &xprt->xpt_flags))
  747. len = -ENOTCONN;
  748. else
  749. len = xprt->xpt_ops->xpo_sendto(rqstp);
  750. mutex_unlock(&xprt->xpt_mutex);
  751. rpc_wake_up(&xprt->xpt_bc_pending);
  752. svc_xprt_release(rqstp);
  753. if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
  754. return 0;
  755. return len;
  756. }
  757. /*
  758. * Timer function to close old temporary transports, using
  759. * a mark-and-sweep algorithm.
  760. */
  761. static void svc_age_temp_xprts(unsigned long closure)
  762. {
  763. struct svc_serv *serv = (struct svc_serv *)closure;
  764. struct svc_xprt *xprt;
  765. struct list_head *le, *next;
  766. dprintk("svc_age_temp_xprts\n");
  767. if (!spin_trylock_bh(&serv->sv_lock)) {
  768. /* busy, try again 1 sec later */
  769. dprintk("svc_age_temp_xprts: busy\n");
  770. mod_timer(&serv->sv_temptimer, jiffies + HZ);
  771. return;
  772. }
  773. list_for_each_safe(le, next, &serv->sv_tempsocks) {
  774. xprt = list_entry(le, struct svc_xprt, xpt_list);
  775. /* First time through, just mark it OLD. Second time
  776. * through, close it. */
  777. if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
  778. continue;
  779. if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
  780. test_bit(XPT_BUSY, &xprt->xpt_flags))
  781. continue;
  782. list_del_init(le);
  783. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  784. set_bit(XPT_DETACHED, &xprt->xpt_flags);
  785. dprintk("queuing xprt %p for closing\n", xprt);
  786. /* a thread will dequeue and close it soon */
  787. svc_xprt_enqueue(xprt);
  788. }
  789. spin_unlock_bh(&serv->sv_lock);
  790. mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
  791. }
  792. static void call_xpt_users(struct svc_xprt *xprt)
  793. {
  794. struct svc_xpt_user *u;
  795. spin_lock(&xprt->xpt_lock);
  796. while (!list_empty(&xprt->xpt_users)) {
  797. u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
  798. list_del(&u->list);
  799. u->callback(u);
  800. }
  801. spin_unlock(&xprt->xpt_lock);
  802. }
  803. /*
  804. * Remove a dead transport
  805. */
  806. static void svc_delete_xprt(struct svc_xprt *xprt)
  807. {
  808. struct svc_serv *serv = xprt->xpt_server;
  809. struct svc_deferred_req *dr;
  810. /* Only do this once */
  811. if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
  812. BUG();
  813. dprintk("svc: svc_delete_xprt(%p)\n", xprt);
  814. xprt->xpt_ops->xpo_detach(xprt);
  815. spin_lock_bh(&serv->sv_lock);
  816. if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
  817. list_del_init(&xprt->xpt_list);
  818. WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
  819. if (test_bit(XPT_TEMP, &xprt->xpt_flags))
  820. serv->sv_tmpcnt--;
  821. spin_unlock_bh(&serv->sv_lock);
  822. while ((dr = svc_deferred_dequeue(xprt)) != NULL)
  823. kfree(dr);
  824. call_xpt_users(xprt);
  825. svc_xprt_put(xprt);
  826. }
  827. void svc_close_xprt(struct svc_xprt *xprt)
  828. {
  829. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  830. if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
  831. /* someone else will have to effect the close */
  832. return;
  833. /*
  834. * We expect svc_close_xprt() to work even when no threads are
  835. * running (e.g., while configuring the server before starting
  836. * any threads), so if the transport isn't busy, we delete
  837. * it ourself:
  838. */
  839. svc_delete_xprt(xprt);
  840. }
  841. EXPORT_SYMBOL_GPL(svc_close_xprt);
  842. static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
  843. {
  844. struct svc_xprt *xprt;
  845. int ret = 0;
  846. spin_lock(&serv->sv_lock);
  847. list_for_each_entry(xprt, xprt_list, xpt_list) {
  848. if (xprt->xpt_net != net)
  849. continue;
  850. ret++;
  851. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  852. svc_xprt_enqueue(xprt);
  853. }
  854. spin_unlock(&serv->sv_lock);
  855. return ret;
  856. }
  857. static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
  858. {
  859. struct svc_pool *pool;
  860. struct svc_xprt *xprt;
  861. struct svc_xprt *tmp;
  862. int i;
  863. for (i = 0; i < serv->sv_nrpools; i++) {
  864. pool = &serv->sv_pools[i];
  865. spin_lock_bh(&pool->sp_lock);
  866. list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
  867. if (xprt->xpt_net != net)
  868. continue;
  869. list_del_init(&xprt->xpt_ready);
  870. spin_unlock_bh(&pool->sp_lock);
  871. return xprt;
  872. }
  873. spin_unlock_bh(&pool->sp_lock);
  874. }
  875. return NULL;
  876. }
  877. static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
  878. {
  879. struct svc_xprt *xprt;
  880. while ((xprt = svc_dequeue_net(serv, net))) {
  881. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  882. svc_delete_xprt(xprt);
  883. }
  884. }
  885. /*
  886. * Server threads may still be running (especially in the case where the
  887. * service is still running in other network namespaces).
  888. *
  889. * So we shut down sockets the same way we would on a running server, by
  890. * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
  891. * the close. In the case there are no such other threads,
  892. * threads running, svc_clean_up_xprts() does a simple version of a
  893. * server's main event loop, and in the case where there are other
  894. * threads, we may need to wait a little while and then check again to
  895. * see if they're done.
  896. */
  897. void svc_close_net(struct svc_serv *serv, struct net *net)
  898. {
  899. int delay = 0;
  900. while (svc_close_list(serv, &serv->sv_permsocks, net) +
  901. svc_close_list(serv, &serv->sv_tempsocks, net)) {
  902. svc_clean_up_xprts(serv, net);
  903. msleep(delay++);
  904. }
  905. }
  906. /*
  907. * Handle defer and revisit of requests
  908. */
  909. static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
  910. {
  911. struct svc_deferred_req *dr =
  912. container_of(dreq, struct svc_deferred_req, handle);
  913. struct svc_xprt *xprt = dr->xprt;
  914. spin_lock(&xprt->xpt_lock);
  915. set_bit(XPT_DEFERRED, &xprt->xpt_flags);
  916. if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
  917. spin_unlock(&xprt->xpt_lock);
  918. dprintk("revisit canceled\n");
  919. svc_xprt_put(xprt);
  920. kfree(dr);
  921. return;
  922. }
  923. dprintk("revisit queued\n");
  924. dr->xprt = NULL;
  925. list_add(&dr->handle.recent, &xprt->xpt_deferred);
  926. spin_unlock(&xprt->xpt_lock);
  927. svc_xprt_enqueue(xprt);
  928. svc_xprt_put(xprt);
  929. }
  930. /*
  931. * Save the request off for later processing. The request buffer looks
  932. * like this:
  933. *
  934. * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
  935. *
  936. * This code can only handle requests that consist of an xprt-header
  937. * and rpc-header.
  938. */
  939. static struct cache_deferred_req *svc_defer(struct cache_req *req)
  940. {
  941. struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
  942. struct svc_deferred_req *dr;
  943. if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
  944. return NULL; /* if more than a page, give up FIXME */
  945. if (rqstp->rq_deferred) {
  946. dr = rqstp->rq_deferred;
  947. rqstp->rq_deferred = NULL;
  948. } else {
  949. size_t skip;
  950. size_t size;
  951. /* FIXME maybe discard if size too large */
  952. size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
  953. dr = kmalloc(size, GFP_KERNEL);
  954. if (dr == NULL)
  955. return NULL;
  956. dr->handle.owner = rqstp->rq_server;
  957. dr->prot = rqstp->rq_prot;
  958. memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
  959. dr->addrlen = rqstp->rq_addrlen;
  960. dr->daddr = rqstp->rq_daddr;
  961. dr->argslen = rqstp->rq_arg.len >> 2;
  962. dr->xprt_hlen = rqstp->rq_xprt_hlen;
  963. /* back up head to the start of the buffer and copy */
  964. skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
  965. memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
  966. dr->argslen << 2);
  967. }
  968. svc_xprt_get(rqstp->rq_xprt);
  969. dr->xprt = rqstp->rq_xprt;
  970. rqstp->rq_dropme = true;
  971. dr->handle.revisit = svc_revisit;
  972. return &dr->handle;
  973. }
  974. /*
  975. * recv data from a deferred request into an active one
  976. */
  977. static int svc_deferred_recv(struct svc_rqst *rqstp)
  978. {
  979. struct svc_deferred_req *dr = rqstp->rq_deferred;
  980. /* setup iov_base past transport header */
  981. rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
  982. /* The iov_len does not include the transport header bytes */
  983. rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
  984. rqstp->rq_arg.page_len = 0;
  985. /* The rq_arg.len includes the transport header bytes */
  986. rqstp->rq_arg.len = dr->argslen<<2;
  987. rqstp->rq_prot = dr->prot;
  988. memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
  989. rqstp->rq_addrlen = dr->addrlen;
  990. /* Save off transport header len in case we get deferred again */
  991. rqstp->rq_xprt_hlen = dr->xprt_hlen;
  992. rqstp->rq_daddr = dr->daddr;
  993. rqstp->rq_respages = rqstp->rq_pages;
  994. return (dr->argslen<<2) - dr->xprt_hlen;
  995. }
  996. static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
  997. {
  998. struct svc_deferred_req *dr = NULL;
  999. if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
  1000. return NULL;
  1001. spin_lock(&xprt->xpt_lock);
  1002. if (!list_empty(&xprt->xpt_deferred)) {
  1003. dr = list_entry(xprt->xpt_deferred.next,
  1004. struct svc_deferred_req,
  1005. handle.recent);
  1006. list_del_init(&dr->handle.recent);
  1007. } else
  1008. clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
  1009. spin_unlock(&xprt->xpt_lock);
  1010. return dr;
  1011. }
  1012. /**
  1013. * svc_find_xprt - find an RPC transport instance
  1014. * @serv: pointer to svc_serv to search
  1015. * @xcl_name: C string containing transport's class name
  1016. * @net: owner net pointer
  1017. * @af: Address family of transport's local address
  1018. * @port: transport's IP port number
  1019. *
  1020. * Return the transport instance pointer for the endpoint accepting
  1021. * connections/peer traffic from the specified transport class,
  1022. * address family and port.
  1023. *
  1024. * Specifying 0 for the address family or port is effectively a
  1025. * wild-card, and will result in matching the first transport in the
  1026. * service's list that has a matching class name.
  1027. */
  1028. struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
  1029. struct net *net, const sa_family_t af,
  1030. const unsigned short port)
  1031. {
  1032. struct svc_xprt *xprt;
  1033. struct svc_xprt *found = NULL;
  1034. /* Sanity check the args */
  1035. if (serv == NULL || xcl_name == NULL)
  1036. return found;
  1037. spin_lock_bh(&serv->sv_lock);
  1038. list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
  1039. if (xprt->xpt_net != net)
  1040. continue;
  1041. if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
  1042. continue;
  1043. if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
  1044. continue;
  1045. if (port != 0 && port != svc_xprt_local_port(xprt))
  1046. continue;
  1047. found = xprt;
  1048. svc_xprt_get(xprt);
  1049. break;
  1050. }
  1051. spin_unlock_bh(&serv->sv_lock);
  1052. return found;
  1053. }
  1054. EXPORT_SYMBOL_GPL(svc_find_xprt);
  1055. static int svc_one_xprt_name(const struct svc_xprt *xprt,
  1056. char *pos, int remaining)
  1057. {
  1058. int len;
  1059. len = snprintf(pos, remaining, "%s %u\n",
  1060. xprt->xpt_class->xcl_name,
  1061. svc_xprt_local_port(xprt));
  1062. if (len >= remaining)
  1063. return -ENAMETOOLONG;
  1064. return len;
  1065. }
  1066. /**
  1067. * svc_xprt_names - format a buffer with a list of transport names
  1068. * @serv: pointer to an RPC service
  1069. * @buf: pointer to a buffer to be filled in
  1070. * @buflen: length of buffer to be filled in
  1071. *
  1072. * Fills in @buf with a string containing a list of transport names,
  1073. * each name terminated with '\n'.
  1074. *
  1075. * Returns positive length of the filled-in string on success; otherwise
  1076. * a negative errno value is returned if an error occurs.
  1077. */
  1078. int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
  1079. {
  1080. struct svc_xprt *xprt;
  1081. int len, totlen;
  1082. char *pos;
  1083. /* Sanity check args */
  1084. if (!serv)
  1085. return 0;
  1086. spin_lock_bh(&serv->sv_lock);
  1087. pos = buf;
  1088. totlen = 0;
  1089. list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
  1090. len = svc_one_xprt_name(xprt, pos, buflen - totlen);
  1091. if (len < 0) {
  1092. *buf = '\0';
  1093. totlen = len;
  1094. }
  1095. if (len <= 0)
  1096. break;
  1097. pos += len;
  1098. totlen += len;
  1099. }
  1100. spin_unlock_bh(&serv->sv_lock);
  1101. return totlen;
  1102. }
  1103. EXPORT_SYMBOL_GPL(svc_xprt_names);
  1104. /*----------------------------------------------------------------------------*/
  1105. static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
  1106. {
  1107. unsigned int pidx = (unsigned int)*pos;
  1108. struct svc_serv *serv = m->private;
  1109. dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
  1110. if (!pidx)
  1111. return SEQ_START_TOKEN;
  1112. return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
  1113. }
  1114. static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
  1115. {
  1116. struct svc_pool *pool = p;
  1117. struct svc_serv *serv = m->private;
  1118. dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
  1119. if (p == SEQ_START_TOKEN) {
  1120. pool = &serv->sv_pools[0];
  1121. } else {
  1122. unsigned int pidx = (pool - &serv->sv_pools[0]);
  1123. if (pidx < serv->sv_nrpools-1)
  1124. pool = &serv->sv_pools[pidx+1];
  1125. else
  1126. pool = NULL;
  1127. }
  1128. ++*pos;
  1129. return pool;
  1130. }
  1131. static void svc_pool_stats_stop(struct seq_file *m, void *p)
  1132. {
  1133. }
  1134. static int svc_pool_stats_show(struct seq_file *m, void *p)
  1135. {
  1136. struct svc_pool *pool = p;
  1137. if (p == SEQ_START_TOKEN) {
  1138. seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
  1139. return 0;
  1140. }
  1141. seq_printf(m, "%u %lu %lu %lu %lu\n",
  1142. pool->sp_id,
  1143. pool->sp_stats.packets,
  1144. pool->sp_stats.sockets_queued,
  1145. pool->sp_stats.threads_woken,
  1146. pool->sp_stats.threads_timedout);
  1147. return 0;
  1148. }
  1149. static const struct seq_operations svc_pool_stats_seq_ops = {
  1150. .start = svc_pool_stats_start,
  1151. .next = svc_pool_stats_next,
  1152. .stop = svc_pool_stats_stop,
  1153. .show = svc_pool_stats_show,
  1154. };
  1155. int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
  1156. {
  1157. int err;
  1158. err = seq_open(file, &svc_pool_stats_seq_ops);
  1159. if (!err)
  1160. ((struct seq_file *) file->private_data)->private = serv;
  1161. return err;
  1162. }
  1163. EXPORT_SYMBOL(svc_pool_stats_open);
  1164. /*----------------------------------------------------------------------------*/